Mercurial > hg > ngx_http_upstream_keepalive
changeset 21:9a4ee6fe1c6d
Keepalive: avoid closing upstream connection on stale events.
Under some conditions it's possible that stale events occur, i.e. read
event handler called for just saved upstream connection without any data
available for read. We shouldn't close upstream connection in such
situation.
Reported by: Martin Fjordvald
author | Maxim Dounin <mdounin@mdounin.ru> |
---|---|
date | Tue, 14 Sep 2010 04:01:14 +0400 |
parents | 8d8eaaf07663 |
children | f3e74fbd0f0b |
files | ngx_http_upstream_keepalive_module.c t/stale.t |
diffstat | 2 files changed, 127 insertions(+), 0 deletions(-) [+] |
line wrap: on
line diff
--- a/ngx_http_upstream_keepalive_module.c +++ b/ngx_http_upstream_keepalive_module.c @@ -386,12 +386,29 @@ ngx_http_upstream_keepalive_close_handle ngx_http_upstream_keepalive_srv_conf_t *conf; ngx_http_upstream_keepalive_cache_t *item; + int n; + char buf[1]; ngx_connection_t *c; ngx_log_debug0(NGX_LOG_DEBUG_HTTP, ev->log, 0, "keepalive close handler"); c = ev->data; + + n = recv(c->fd, buf, 1, MSG_PEEK); + + if (n == -1 && ngx_socket_errno == NGX_EAGAIN) { + /* stale event */ + + if (ngx_handle_read_event(c->read, 0) != NGX_OK) { + goto close; + } + + return; + } + +close: + item = c->data; conf = item->conf;
new file mode 100644 --- /dev/null +++ b/t/stale.t @@ -0,0 +1,110 @@ +#!/usr/bin/perl + +# (C) Maxim Dounin + +# Test for stale events handling in upstream keepalive. + +############################################################################### + +use warnings; +use strict; + +use Test::More; +use Test::Nginx; + +############################################################################### + +select STDERR; $| = 1; +select STDOUT; $| = 1; + +eval { require Cache::Memcached; }; +plan(skip_all => 'Cache::Memcached not installed') if $@; + +my $t = Test::Nginx->new()->has('rewrite')->has_daemon('memcached')->plan(1) + ->write_file_expand('nginx.conf', <<'EOF'); + +%%TEST_GLOBALS%% + +daemon off; + +worker_processes 2; + +events { +} + +http { + %%TEST_GLOBALS_HTTP%% + + upstream memd { + server 127.0.0.1:8081; + keepalive 1; + } + + server { + listen 127.0.0.1:8080 sndbuf=32k; + server_name localhost; + + location / { + set $memcached_key $uri; + memcached_pass memd; + } + } +} + +EOF + +my $memhelp = `memcached -h`; +my @memopts1 = (); + +if ($memhelp =~ /repcached/) { + # repcached patches adds additional listen socket memcached + # that should be different too + + push @memopts1, '-X', '8091'; +} +if ($memhelp =~ /-U/) { + # UDP ports no longer off by default in memcached 1.2.7+ + + push @memopts1, '-U', '0'; +} + +$t->run_daemon('memcached', '-l', '127.0.0.1', '-p', '8081', @memopts1); + +$t->run(); + +$t->waitforsocket('127.0.0.1:8081') + or die "Unable to start memcached"; + +############################################################################### + +my $memd1 = Cache::Memcached->new(servers => [ '127.0.0.1:8081' ]); + +# It's possible that stale events occur, i.e. read event handler called +# for just saved upstream connection without any data available for +# read. We shouldn't close upstream connection in such situation. +# +# This happens due to reading from upstream connection on downstream write +# events. More likely to happen with multiple workers due to use of posted +# events. +# +# Stale event may only happen if reading response from upstream requires +# entering event loop, i.e. response should be big enough. On the other +# hand, it is less likely to occur with full client's connection output +# buffer. +# +# We use here 2 workers, 20k response and set output buffer on clients +# connection to 32k. This allows more or less reliably reproduce stale +# events at least on FreeBSD testbed here. + +$memd1->set('/big', 'X' x 20480); + +my $total = $memd1->stats()->{total}->{total_connections}; + +for (1 .. 100) { + http_get('/big'); +} + +cmp_ok($memd1->stats()->{total}->{total_connections}, '<=', $total + 2, + 'only one connection per worker used'); + +###############################################################################