Mercurial > hg > nginx
view src/event/ngx_event_accept.c @ 9203:0de20f43db25
Fixed request termination with AIO and subrequests (ticket #2555).
When a request was terminated due to an error via ngx_http_terminate_request()
while an AIO operation was running in a subrequest, various issues were
observed. This happened because ngx_http_request_finalizer() was only set
in the subrequest where ngx_http_terminate_request() was called, but not
in the subrequest where the AIO operation was running. After completion
of the AIO operation normal processing of the subrequest was resumed, leading
to issues.
In particular, in case of the upstream module, termination of the request
called upstream cleanup, which closed the upstream connection. Attempts to
further work with the upstream connection after AIO operation completion
resulted in segfaults in ngx_ssl_recv(), "readv() failed (9: Bad file
descriptor) while reading upstream" errors, or socket leaks.
In ticket #2555, issues were observed with the following configuration
with cache background update (with thread writing instrumented to
introduce a delay, when a client closes the connection during an update):
location = /background-and-aio-write {
proxy_pass ...
proxy_cache one;
proxy_cache_valid 200 1s;
proxy_cache_background_update on;
proxy_cache_use_stale updating;
aio threads;
aio_write on;
limit_rate 1000;
}
Similarly, the same issue can be seen with SSI, and can be caused by
errors in subrequests, such as in the following configuration
(where "/proxy" uses AIO, and "/sleep" returns 444 after some delay,
causing request termination):
location = /ssi-active-boom {
ssi on;
ssi_types *;
return 200 '
<!--#include virtual="/proxy" -->
<!--#include virtual="/sleep" -->
';
limit_rate 1000;
}
Or the same with both AIO operation and the error in non-active subrequests
(which needs slightly different handling, see below):
location = /ssi-non-active-boom {
ssi on;
ssi_types *;
return 200 '
<!--#include virtual="/static" -->
<!--#include virtual="/proxy" -->
<!--#include virtual="/sleep" -->
';
limit_rate 1000;
}
Similarly, issues can be observed with just static files. However,
with static files potential impact is limited due to timeout safeguards
in ngx_http_writer(), and the fact that c->error is set during request
termination.
In a simple configuration with an AIO operation in the active subrequest,
such as in the following configuration, the connection is closed right
after completion of the AIO operation anyway, since ngx_http_writer()
tries to write to the connection and fails due to c->error set:
location = /ssi-active-static-boom {
ssi on;
ssi_types *;
return 200 '
<!--#include virtual="/static-aio" -->
<!--#include virtual="/sleep" -->
';
limit_rate 1000;
}
In the following configuration, with an AIO operation in a non-active
subrequest, the connection is closed only after send_timeout expires:
location = /ssi-non-active-static-boom {
ssi on;
ssi_types *;
return 200 '
<!--#include virtual="/static" -->
<!--#include virtual="/static-aio" -->
<!--#include virtual="/sleep" -->
';
limit_rate 1000;
}
Fix is to introduce r->main->terminated flag, which is to be checked
by AIO event handlers when the r->main->blocked counter is decremented.
When the flag is set, handlers are expected to wake up the connection
instead of the subrequest (which might be already cleaned up).
Additionally, now ngx_http_request_finalizer() is always set in the
active subrequest, so waking up the connection properly finalizes the
request even if termination happened in a non-active subrequest.
author | Maxim Dounin <mdounin@mdounin.ru> |
---|---|
date | Tue, 30 Jan 2024 03:20:05 +0300 |
parents | e2d07e4ec636 |
children |
line wrap: on
line source
/* * Copyright (C) Igor Sysoev * Copyright (C) Nginx, Inc. */ #include <ngx_config.h> #include <ngx_core.h> #include <ngx_event.h> static ngx_int_t ngx_disable_accept_events(ngx_cycle_t *cycle, ngx_uint_t all); #if (NGX_HAVE_EPOLLEXCLUSIVE) static void ngx_reorder_accept_events(ngx_listening_t *ls); #endif static void ngx_close_accepted_connection(ngx_connection_t *c); void ngx_event_accept(ngx_event_t *ev) { socklen_t socklen; ngx_err_t err; ngx_log_t *log; ngx_uint_t level; ngx_socket_t s; ngx_event_t *rev, *wev; ngx_sockaddr_t sa; ngx_listening_t *ls; ngx_connection_t *c, *lc; ngx_event_conf_t *ecf; #if (NGX_HAVE_ACCEPT4) static ngx_uint_t use_accept4 = 1; #endif if (ev->timedout) { if (ngx_enable_accept_events((ngx_cycle_t *) ngx_cycle) != NGX_OK) { return; } ev->timedout = 0; } ecf = ngx_event_get_conf(ngx_cycle->conf_ctx, ngx_event_core_module); if (!(ngx_event_flags & NGX_USE_KQUEUE_EVENT)) { ev->available = ecf->multi_accept; } lc = ev->data; ls = lc->listening; ev->ready = 0; ngx_log_debug2(NGX_LOG_DEBUG_EVENT, ev->log, 0, "accept on %V, ready: %d", &ls->addr_text, ev->available); do { socklen = sizeof(ngx_sockaddr_t); #if (NGX_HAVE_ACCEPT4) if (use_accept4) { s = accept4(lc->fd, &sa.sockaddr, &socklen, SOCK_NONBLOCK); } else { s = accept(lc->fd, &sa.sockaddr, &socklen); } #else s = accept(lc->fd, &sa.sockaddr, &socklen); #endif if (s == (ngx_socket_t) -1) { err = ngx_socket_errno; if (err == NGX_EAGAIN) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, ev->log, err, "accept() not ready"); return; } level = NGX_LOG_ALERT; if (err == NGX_ECONNABORTED) { level = NGX_LOG_ERR; } else if (err == NGX_EMFILE || err == NGX_ENFILE) { level = NGX_LOG_CRIT; } #if (NGX_HAVE_ACCEPT4) ngx_log_error(level, ev->log, err, use_accept4 ? "accept4() failed" : "accept() failed"); if (use_accept4 && err == NGX_ENOSYS) { use_accept4 = 0; ngx_inherited_nonblocking = 0; continue; } #else ngx_log_error(level, ev->log, err, "accept() failed"); #endif if (err == NGX_ECONNABORTED) { if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } if (ev->available) { continue; } } if (err == NGX_EMFILE || err == NGX_ENFILE) { if (ngx_disable_accept_events((ngx_cycle_t *) ngx_cycle, 1) != NGX_OK) { return; } if (ngx_use_accept_mutex) { if (ngx_accept_mutex_held) { ngx_shmtx_unlock(&ngx_accept_mutex); ngx_accept_mutex_held = 0; } ngx_accept_disabled = 1; } else { ngx_add_timer(ev, ecf->accept_mutex_delay); } } return; } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_accepted, 1); #endif ngx_accept_disabled = ngx_cycle->connection_n / 8 - ngx_cycle->free_connection_n; c = ngx_get_connection(s, ev->log); if (c == NULL) { if (ngx_close_socket(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_close_socket_n " failed"); } return; } c->type = SOCK_STREAM; #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, 1); #endif c->pool = ngx_create_pool(ls->pool_size, ev->log); if (c->pool == NULL) { ngx_close_accepted_connection(c); return; } if (socklen > (socklen_t) sizeof(ngx_sockaddr_t)) { socklen = sizeof(ngx_sockaddr_t); } c->sockaddr = ngx_palloc(c->pool, socklen); if (c->sockaddr == NULL) { ngx_close_accepted_connection(c); return; } ngx_memcpy(c->sockaddr, &sa, socklen); log = ngx_palloc(c->pool, sizeof(ngx_log_t)); if (log == NULL) { ngx_close_accepted_connection(c); return; } /* set a blocking mode for iocp and non-blocking mode for others */ if (ngx_inherited_nonblocking) { if (ngx_event_flags & NGX_USE_IOCP_EVENT) { if (ngx_blocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_blocking_n " failed"); ngx_close_accepted_connection(c); return; } } } else { if (!(ngx_event_flags & NGX_USE_IOCP_EVENT)) { if (ngx_nonblocking(s) == -1) { ngx_log_error(NGX_LOG_ALERT, ev->log, ngx_socket_errno, ngx_nonblocking_n " failed"); ngx_close_accepted_connection(c); return; } } } *log = ls->log; c->recv = ngx_recv; c->send = ngx_send; c->recv_chain = ngx_recv_chain; c->send_chain = ngx_send_chain; c->log = log; c->pool->log = log; c->socklen = socklen; c->listening = ls; c->local_sockaddr = ls->sockaddr; c->local_socklen = ls->socklen; #if (NGX_HAVE_UNIX_DOMAIN) if (c->sockaddr->sa_family == AF_UNIX) { c->tcp_nopush = NGX_TCP_NOPUSH_DISABLED; c->tcp_nodelay = NGX_TCP_NODELAY_DISABLED; #if (NGX_SOLARIS) /* Solaris's sendfilev() supports AF_NCA, AF_INET, and AF_INET6 */ c->sendfile = 0; #endif } #endif rev = c->read; wev = c->write; wev->ready = 1; if (ngx_event_flags & NGX_USE_IOCP_EVENT) { rev->ready = 1; } if (ev->deferred_accept) { rev->ready = 1; #if (NGX_HAVE_KQUEUE || NGX_HAVE_EPOLLRDHUP) rev->available = 1; #endif } rev->log = log; wev->log = log; /* * TODO: MT: - ngx_atomic_fetch_add() * or protection by critical section or light mutex * * TODO: MP: - allocated in a shared memory * - ngx_atomic_fetch_add() * or protection by critical section or light mutex */ c->number = ngx_atomic_fetch_add(ngx_connection_counter, 1); c->start_time = ngx_current_msec; #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_handled, 1); #endif if (ls->addr_ntop) { c->addr_text.data = ngx_pnalloc(c->pool, ls->addr_text_max_len); if (c->addr_text.data == NULL) { ngx_close_accepted_connection(c); return; } c->addr_text.len = ngx_sock_ntop(c->sockaddr, c->socklen, c->addr_text.data, ls->addr_text_max_len, 0); if (c->addr_text.len == 0) { ngx_close_accepted_connection(c); return; } } #if (NGX_DEBUG) { ngx_str_t addr; u_char text[NGX_SOCKADDR_STRLEN]; ngx_debug_accepted_connection(ecf, c); if (log->log_level & NGX_LOG_DEBUG_EVENT) { addr.data = text; addr.len = ngx_sock_ntop(c->sockaddr, c->socklen, text, NGX_SOCKADDR_STRLEN, 1); ngx_log_debug3(NGX_LOG_DEBUG_EVENT, log, 0, "*%uA accept: %V fd:%d", c->number, &addr, s); } } #endif if (ngx_add_conn && (ngx_event_flags & NGX_USE_EPOLL_EVENT) == 0) { if (ngx_add_conn(c) == NGX_ERROR) { ngx_close_accepted_connection(c); return; } } log->data = NULL; log->handler = NULL; ls->handler(c); if (ngx_event_flags & NGX_USE_KQUEUE_EVENT) { ev->available--; } } while (ev->available); #if (NGX_HAVE_EPOLLEXCLUSIVE) ngx_reorder_accept_events(ls); #endif } ngx_int_t ngx_trylock_accept_mutex(ngx_cycle_t *cycle) { if (ngx_shmtx_trylock(&ngx_accept_mutex)) { ngx_log_debug0(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex locked"); if (ngx_accept_mutex_held && ngx_accept_events == 0) { return NGX_OK; } if (ngx_enable_accept_events(cycle) == NGX_ERROR) { ngx_shmtx_unlock(&ngx_accept_mutex); return NGX_ERROR; } ngx_accept_events = 0; ngx_accept_mutex_held = 1; return NGX_OK; } ngx_log_debug1(NGX_LOG_DEBUG_EVENT, cycle->log, 0, "accept mutex lock failed: %ui", ngx_accept_mutex_held); if (ngx_accept_mutex_held) { if (ngx_disable_accept_events(cycle, 0) == NGX_ERROR) { return NGX_ERROR; } ngx_accept_mutex_held = 0; } return NGX_OK; } ngx_int_t ngx_enable_accept_events(ngx_cycle_t *cycle) { ngx_uint_t i; ngx_listening_t *ls; ngx_connection_t *c; ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ls[i].connection; if (c == NULL || c->read->active) { continue; } if (ngx_add_event(c->read, NGX_READ_EVENT, 0) == NGX_ERROR) { return NGX_ERROR; } } return NGX_OK; } static ngx_int_t ngx_disable_accept_events(ngx_cycle_t *cycle, ngx_uint_t all) { ngx_uint_t i; ngx_listening_t *ls; ngx_connection_t *c; ls = cycle->listening.elts; for (i = 0; i < cycle->listening.nelts; i++) { c = ls[i].connection; if (c == NULL || !c->read->active) { continue; } #if (NGX_HAVE_REUSEPORT) /* * do not disable accept on worker's own sockets * when disabling accept events due to accept mutex */ if (ls[i].reuseport && !all) { continue; } #endif if (ngx_del_event(c->read, NGX_READ_EVENT, NGX_DISABLE_EVENT) == NGX_ERROR) { return NGX_ERROR; } } return NGX_OK; } #if (NGX_HAVE_EPOLLEXCLUSIVE) static void ngx_reorder_accept_events(ngx_listening_t *ls) { ngx_connection_t *c; /* * Linux with EPOLLEXCLUSIVE usually notifies only the process which * was first to add the listening socket to the epoll instance. As * a result most of the connections are handled by the first worker * process. To fix this, we re-add the socket periodically, so other * workers will get a chance to accept connections. */ if (!ngx_use_exclusive_accept) { return; } #if (NGX_HAVE_REUSEPORT) if (ls->reuseport) { return; } #endif c = ls->connection; if (c->requests++ % 16 != 0 && ngx_accept_disabled <= 0) { return; } if (ngx_del_event(c->read, NGX_READ_EVENT, NGX_DISABLE_EVENT) == NGX_ERROR) { return; } if (ngx_add_event(c->read, NGX_READ_EVENT, NGX_EXCLUSIVE_EVENT) == NGX_ERROR) { return; } } #endif static void ngx_close_accepted_connection(ngx_connection_t *c) { ngx_socket_t fd; ngx_free_connection(c); fd = c->fd; c->fd = (ngx_socket_t) -1; if (ngx_close_socket(fd) == -1) { ngx_log_error(NGX_LOG_ALERT, c->log, ngx_socket_errno, ngx_close_socket_n " failed"); } if (c->pool) { ngx_destroy_pool(c->pool); } #if (NGX_STAT_STUB) (void) ngx_atomic_fetch_add(ngx_stat_active, -1); #endif } u_char * ngx_accept_log_error(ngx_log_t *log, u_char *buf, size_t len) { return ngx_snprintf(buf, len, " while accepting new connection on %V", log->data); } #if (NGX_DEBUG) void ngx_debug_accepted_connection(ngx_event_conf_t *ecf, ngx_connection_t *c) { struct sockaddr_in *sin; ngx_cidr_t *cidr; ngx_uint_t i; #if (NGX_HAVE_INET6) struct sockaddr_in6 *sin6; ngx_uint_t n; #endif cidr = ecf->debug_connection.elts; for (i = 0; i < ecf->debug_connection.nelts; i++) { if (cidr[i].family != (ngx_uint_t) c->sockaddr->sa_family) { goto next; } switch (cidr[i].family) { #if (NGX_HAVE_INET6) case AF_INET6: sin6 = (struct sockaddr_in6 *) c->sockaddr; for (n = 0; n < 16; n++) { if ((sin6->sin6_addr.s6_addr[n] & cidr[i].u.in6.mask.s6_addr[n]) != cidr[i].u.in6.addr.s6_addr[n]) { goto next; } } break; #endif #if (NGX_HAVE_UNIX_DOMAIN) case AF_UNIX: break; #endif default: /* AF_INET */ sin = (struct sockaddr_in *) c->sockaddr; if ((sin->sin_addr.s_addr & cidr[i].u.in.mask) != cidr[i].u.in.addr) { goto next; } break; } c->log->log_level = NGX_LOG_DEBUG_CONNECTION|NGX_LOG_DEBUG_ALL; break; next: continue; } } #endif