view memcached_keepalive.t @ 1829:a78c32419f02

Tests: separate SSL session reuse tests. Instead of being mixed with generic SSL tests, session reuse variants are now tested in a separate file. In the generic SSL tests only basic session reuse is now tested, notably with session tickets enabled and a shared SSL session cache. This should make it possible to reuse sessions in all cases (except when it's not supported, such as with LibreSSL with TLSv1.3). Note that session reuse with tickets implies that $ssl_session_id is selected by the client and therefore is not available on the initial connection. Relevant test is modified to handle this. Further, BoringSSL does not use legacy session ID with TLSv1.3 even if it is sent by the client. In contrast, OpenSSL always generates an unique legacy session id, so it is available with TLSv1.3 even if session resumption does not work (such as with old Net::SSLeay and IO::Socket::SSL modules).
author Maxim Dounin <mdounin@mdounin.ru>
date Thu, 23 Mar 2023 19:49:47 +0300
parents 882267679006
children
line wrap: on
line source

#!/usr/bin/perl

# (C) Maxim Dounin

# Test for memcached with keepalive.

###############################################################################

use warnings;
use strict;

use Test::More;

BEGIN { use FindBin; chdir($FindBin::Bin); }

use lib 'lib';
use Test::Nginx;

###############################################################################

select STDERR; $| = 1;
select STDOUT; $| = 1;

eval { require Cache::Memcached; };
plan(skip_all => 'Cache::Memcached not installed') if $@;

my $t = Test::Nginx->new()->has(qw/http memcached upstream_keepalive rewrite/)
	->has_daemon('memcached')->plan(15)
	->write_file_expand('nginx.conf', <<'EOF');

%%TEST_GLOBALS%%

daemon off;

events {
}

http {
    %%TEST_GLOBALS_HTTP%%

    upstream memd {
        server 127.0.0.1:8081;
        keepalive 1;
    }

    upstream memd3 {
        server 127.0.0.1:8081;
        server 127.0.0.1:8082;
        keepalive 1;
    }

    upstream memd4 {
        server 127.0.0.1:8081;
        server 127.0.0.1:8082;
        keepalive 10;
    }

    server {
        listen       127.0.0.1:8080;
        server_name  localhost;

        location / {
            set $memcached_key $uri;
            memcached_pass memd;
        }

        location /next {
            set $memcached_key $uri;
            memcached_next_upstream  not_found;
            memcached_pass memd;
        }

        location /memd3 {
            set $memcached_key "/";
            memcached_pass memd3;
        }

        location /memd4 {
            set $memcached_key "/";
            memcached_pass memd4;
        }
    }
}

EOF

my $memhelp = `memcached -h`;
my @memopts1 = ();
my @memopts2 = ();

if ($memhelp =~ /repcached/) {
	# repcached patches adds additional listen socket memcached
	# that should be different too

	push @memopts1, '-X', port(8083);
	push @memopts2, '-X', port(8084);
}
if ($memhelp =~ /-U/) {
	# UDP ports no longer off by default in memcached 1.2.7+

	push @memopts1, '-U', '0';
	push @memopts2, '-U', '0';
}
if ($memhelp =~ /-t/) {
	# for connection stats consistency in threaded memcached 1.3+

	push @memopts1, '-t', '1';
	push @memopts2, '-t', '1';
}

$t->run_daemon('memcached', '-l', '127.0.0.1', '-p', port(8081), @memopts1);
$t->run_daemon('memcached', '-l', '127.0.0.1', '-p', port(8082), @memopts2);

$t->run();

$t->waitforsocket('127.0.0.1:' . port(8081))
	or die "Unable to start memcached";
$t->waitforsocket('127.0.0.1:' . port(8082))
	or die "Unable to start second memcached";

###############################################################################

my $memd1 = Cache::Memcached->new(servers => [ '127.0.0.1:' . port(8081) ],
	connect_timeout => 1.0);
my $memd2 = Cache::Memcached->new(servers => [ '127.0.0.1:' . port(8082) ],
	connect_timeout => 1.0);

$memd1->set('/', 'SEE-THIS');
$memd2->set('/', 'SEE-THIS');
$memd1->set('/big', 'X' x 1000000);

my $total = $memd1->stats()->{total}->{total_connections};

like(http_get('/'), qr/SEE-THIS/, 'keepalive memcached request');
like(http_get('/notfound'), qr/ 404 /, 'keepalive memcached not found');
like(http_get('/next'), qr/ 404 /,
	'keepalive not found with memcached_next_upstream');
like(http_get('/'), qr/SEE-THIS/, 'keepalive memcached request again');
like(http_get('/'), qr/SEE-THIS/, 'keepalive memcached request again');
like(http_get('/'), qr/SEE-THIS/, 'keepalive memcached request again');

is($memd1->stats()->{total}->{total_connections}, $total + 1,
	'only one connection used');

# Since nginx doesn't read all data from connection in some situations (head
# requests, post_action, errors writing to client) we have to close such
# connections.  Check if we really do close them.

$total = $memd1->stats()->{total}->{total_connections};

unlike(http_head('/'), qr/SEE-THIS/, 'head request');
like(http_get('/'), qr/SEE-THIS/, 'get after head');

is($memd1->stats()->{total}->{total_connections}, $total + 1,
	'head request closes connection');

$total = $memd1->stats()->{total}->{total_connections};

unlike(http_head('/big'), qr/XXX/, 'big head');
like(http_get('/'), qr/SEE-THIS/, 'get after big head');

is($memd1->stats()->{total}->{total_connections}, $total + 1,
	'big head request closes connection');

# two backends with maximum number of cached connections set to 1,
# should establish new connection on each request

$total = $memd1->stats()->{total}->{total_connections} +
	$memd2->stats()->{total}->{total_connections};

http_get('/memd3');
http_get('/memd3');
http_get('/memd3');

is($memd1->stats()->{total}->{total_connections} +
	$memd2->stats()->{total}->{total_connections}, $total + 3,
	'3 connections should be established');

# two backends with maximum number of cached connections set to 10,
# should establish only two connections (1 per backend)

$total = $memd1->stats()->{total}->{total_connections} +
	$memd2->stats()->{total}->{total_connections};

http_get('/memd4');
http_get('/memd4');
http_get('/memd4');

is($memd1->stats()->{total}->{total_connections} +
	$memd2->stats()->{total}->{total_connections}, $total + 2,
	'connection per backend');

###############################################################################