Mercurial > hg > ngx_http_upstream_keepalive
changeset 11:15530a464dba
Keepalive: don't cache invalid connections.
1. Remember failed status, since peer.free() may be called more than once.
It's called twice if peer fails - once from ngx_http_upstream_next() with
NGX_PEER_FAILED set, and then from ngx_http_upstream_finalize_request()
without.
2. We shouldn't cache connection unless we aren't expecting anything from
upstream. For memcached this means either 404 response or 200 response with
all body read (body may not be read e.g. when serving HEAD request).
author | Maxim Dounin <mdounin@mdounin.ru> |
---|---|
date | Thu, 13 Nov 2008 19:36:23 +0300 |
parents | 06bd0e50e696 |
children | 067ddc059ee0 |
files | ngx_http_upstream_keepalive_module.c t/memcached-keepalive.t |
diffstat | 2 files changed, 69 insertions(+), 5 deletions(-) [+] |
line wrap: on
line diff
--- a/ngx_http_upstream_keepalive_module.c +++ b/ngx_http_upstream_keepalive_module.c @@ -25,11 +25,15 @@ typedef struct { typedef struct { ngx_http_upstream_keepalive_srv_conf_t *conf; + ngx_http_upstream_t *upstream; + void *data; ngx_event_get_peer_pt original_get_peer; ngx_event_free_peer_pt original_free_peer; + ngx_uint_t failed; /* unsigned:1 */ + } ngx_http_upstream_keepalive_peer_data_t; @@ -169,6 +173,7 @@ ngx_http_upstream_init_keepalive_peer(ng } kp->conf = kcf; + kp->upstream = r->upstream; kp->data = r->upstream->peer.data; kp->original_get_peer = r->upstream->peer.get; kp->original_free_peer = r->upstream->peer.free; @@ -194,6 +199,8 @@ ngx_http_upstream_get_keepalive_peer(ngx ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pc->log, 0, "get keepalive peer"); + kp->failed = 0; + /* single pool of cached connections */ if (kp->conf->single && !ngx_queue_empty(&kp->conf->cache)) { @@ -264,14 +271,50 @@ ngx_http_upstream_free_keepalive_peer(ng ngx_http_upstream_keepalive_peer_data_t *kp = data; ngx_http_upstream_keepalive_cache_t *item; - ngx_queue_t *q; - ngx_connection_t *c; + ngx_uint_t status; + ngx_queue_t *q; + ngx_connection_t *c; + ngx_http_upstream_t *u; ngx_log_debug0(NGX_LOG_DEBUG_HTTP, pc->log, 0, "free keepalive peer"); - if (!(state & NGX_PEER_FAILED) - && pc->connection != NULL) + /* remember failed state - peer.free() may be called more than once */ + + if (state & NGX_PEER_FAILED) { + kp->failed = 1; + } + + /* + * cache valid connections + * + * For memcached this means status either 404 or 200. For status 200 we + * should also check if all response body was read (u->length == 0) and + * make sure that u->length is valid (we use u->header_sent flag to test + * this). Memcached is the only supported protocol for now. + * + * Some notes on other possibilities (incomplete): + * + * fastcgi: u->pipe->upstream_done should be sufficient + * + * proxy buffered: u->pipe->upstream_done, 304 replies, replies to head + * requests (see RFC 2616, 4.4 Message Length) + * + * proxy unbuffered: 200 as for memcached (with u->length == 0 and + * header_sent), 304, replies to head requests + * + * subrequest_in_memory: won't work as of now + * + * TODO: move this logic to protocol modules (NGX_PEER_KEEPALIVE?) + */ + + u = kp->upstream; + status = u->headers_in.status_n; + + if (!kp->failed + && pc->connection != NULL + && (status == NGX_HTTP_NOT_FOUND + || (status == NGX_HTTP_OK && u->header_sent && u->length == 0))) { c = pc->connection;
--- a/t/memcached-keepalive.t +++ b/t/memcached-keepalive.t @@ -20,7 +20,7 @@ select STDOUT; $| = 1; eval { require Cache::Memcached; }; plain(skip_all => 'Cache::Memcached not installed') if $@; -my $t = Test::Nginx->new()->has('rewrite')->has_daemon('memcached')->plan(10) +my $t = Test::Nginx->new()->has('rewrite')->has_daemon('memcached')->plan(16) ->write_file_expand('nginx.conf', <<'EOF'); master_process off; @@ -105,6 +105,7 @@ my $memd2 = Cache::Memcached->new(server $memd1->set('/', 'SEE-THIS'); $memd2->set('/', 'SEE-THIS'); +$memd1->set('/big', 'X' x 1000000); my $total = $memd1->stats()->{total}->{total_connections}; @@ -119,6 +120,26 @@ like(http_get('/'), qr/SEE-THIS/, 'keepa is($memd1->stats()->{total}->{total_connections}, $total + 1, 'only one connection used'); +# Since nginx doesn't read all data from connection in some situations (head +# requests, post_action, errors writing to client) we have to close such +# connections. Check if we really do close them. + +$total = $memd1->stats()->{total}->{total_connections}; + +unlike(http_head('/'), qr/SEE-THIS/, 'head request'); +like(http_get('/'), qr/SEE-THIS/, 'get after head'); + +is($memd1->stats()->{total}->{total_connections}, $total + 1, + 'head request closes connection'); + +$total = $memd1->stats()->{total}->{total_connections}; + +unlike(http_head('/big'), qr/XXX/, 'big head'); +like(http_get('/'), qr/SEE-THIS/, 'get after big head'); + +is($memd1->stats()->{total}->{total_connections}, $total + 1, + 'big head request closes connection'); + # two backends with 'single' option - should establish only one connection $total = $memd1->stats()->{total}->{total_connections} +