Mercurial > hg > nginx
view src/http/ngx_http_cache.h @ 9203:0de20f43db25
Fixed request termination with AIO and subrequests (ticket #2555).
When a request was terminated due to an error via ngx_http_terminate_request()
while an AIO operation was running in a subrequest, various issues were
observed. This happened because ngx_http_request_finalizer() was only set
in the subrequest where ngx_http_terminate_request() was called, but not
in the subrequest where the AIO operation was running. After completion
of the AIO operation normal processing of the subrequest was resumed, leading
to issues.
In particular, in case of the upstream module, termination of the request
called upstream cleanup, which closed the upstream connection. Attempts to
further work with the upstream connection after AIO operation completion
resulted in segfaults in ngx_ssl_recv(), "readv() failed (9: Bad file
descriptor) while reading upstream" errors, or socket leaks.
In ticket #2555, issues were observed with the following configuration
with cache background update (with thread writing instrumented to
introduce a delay, when a client closes the connection during an update):
location = /background-and-aio-write {
proxy_pass ...
proxy_cache one;
proxy_cache_valid 200 1s;
proxy_cache_background_update on;
proxy_cache_use_stale updating;
aio threads;
aio_write on;
limit_rate 1000;
}
Similarly, the same issue can be seen with SSI, and can be caused by
errors in subrequests, such as in the following configuration
(where "/proxy" uses AIO, and "/sleep" returns 444 after some delay,
causing request termination):
location = /ssi-active-boom {
ssi on;
ssi_types *;
return 200 '
<!--#include virtual="/proxy" -->
<!--#include virtual="/sleep" -->
';
limit_rate 1000;
}
Or the same with both AIO operation and the error in non-active subrequests
(which needs slightly different handling, see below):
location = /ssi-non-active-boom {
ssi on;
ssi_types *;
return 200 '
<!--#include virtual="/static" -->
<!--#include virtual="/proxy" -->
<!--#include virtual="/sleep" -->
';
limit_rate 1000;
}
Similarly, issues can be observed with just static files. However,
with static files potential impact is limited due to timeout safeguards
in ngx_http_writer(), and the fact that c->error is set during request
termination.
In a simple configuration with an AIO operation in the active subrequest,
such as in the following configuration, the connection is closed right
after completion of the AIO operation anyway, since ngx_http_writer()
tries to write to the connection and fails due to c->error set:
location = /ssi-active-static-boom {
ssi on;
ssi_types *;
return 200 '
<!--#include virtual="/static-aio" -->
<!--#include virtual="/sleep" -->
';
limit_rate 1000;
}
In the following configuration, with an AIO operation in a non-active
subrequest, the connection is closed only after send_timeout expires:
location = /ssi-non-active-static-boom {
ssi on;
ssi_types *;
return 200 '
<!--#include virtual="/static" -->
<!--#include virtual="/static-aio" -->
<!--#include virtual="/sleep" -->
';
limit_rate 1000;
}
Fix is to introduce r->main->terminated flag, which is to be checked
by AIO event handlers when the r->main->blocked counter is decremented.
When the flag is set, handlers are expected to wake up the connection
instead of the subrequest (which might be already cleaned up).
Additionally, now ngx_http_request_finalizer() is always set in the
active subrequest, so waking up the connection properly finalizes the
request even if termination happened in a non-active subrequest.
author | Maxim Dounin <mdounin@mdounin.ru> |
---|---|
date | Tue, 30 Jan 2024 03:20:05 +0300 |
parents | 3781de64e747 |
children |
line wrap: on
line source
/* * Copyright (C) Igor Sysoev * Copyright (C) Nginx, Inc. */ #ifndef _NGX_HTTP_CACHE_H_INCLUDED_ #define _NGX_HTTP_CACHE_H_INCLUDED_ #include <ngx_config.h> #include <ngx_core.h> #include <ngx_http.h> #define NGX_HTTP_CACHE_MISS 1 #define NGX_HTTP_CACHE_BYPASS 2 #define NGX_HTTP_CACHE_EXPIRED 3 #define NGX_HTTP_CACHE_STALE 4 #define NGX_HTTP_CACHE_UPDATING 5 #define NGX_HTTP_CACHE_REVALIDATED 6 #define NGX_HTTP_CACHE_HIT 7 #define NGX_HTTP_CACHE_SCARCE 8 #define NGX_HTTP_CACHE_KEY_LEN 16 #define NGX_HTTP_CACHE_ETAG_LEN 128 #define NGX_HTTP_CACHE_VARY_LEN 128 #define NGX_HTTP_CACHE_VERSION 5 typedef struct { ngx_uint_t status; time_t valid; } ngx_http_cache_valid_t; typedef struct { ngx_rbtree_node_t node; ngx_queue_t queue; u_char key[NGX_HTTP_CACHE_KEY_LEN - sizeof(ngx_rbtree_key_t)]; unsigned count:20; unsigned uses:10; unsigned valid_msec:10; unsigned error:10; unsigned exists:1; unsigned updating:1; unsigned deleting:1; unsigned purged:1; /* 10 unused bits */ ngx_file_uniq_t uniq; time_t expire; time_t valid_sec; size_t body_start; off_t fs_size; ngx_msec_t lock_time; } ngx_http_file_cache_node_t; struct ngx_http_cache_s { ngx_file_t file; ngx_array_t keys; uint32_t crc32; u_char key[NGX_HTTP_CACHE_KEY_LEN]; u_char main[NGX_HTTP_CACHE_KEY_LEN]; ngx_file_uniq_t uniq; time_t valid_sec; time_t updating_sec; time_t error_sec; time_t last_modified; time_t date; ngx_str_t etag; ngx_str_t vary; u_char variant[NGX_HTTP_CACHE_KEY_LEN]; size_t buffer_size; size_t header_start; size_t body_start; off_t length; off_t fs_size; ngx_uint_t min_uses; ngx_uint_t error; ngx_uint_t valid_msec; ngx_uint_t vary_tag; ngx_buf_t *buf; ngx_http_file_cache_t *file_cache; ngx_http_file_cache_node_t *node; #if (NGX_THREADS || NGX_COMPAT) ngx_thread_task_t *thread_task; #endif ngx_msec_t lock_timeout; ngx_msec_t lock_age; ngx_msec_t lock_time; ngx_msec_t wait_time; ngx_event_t wait_event; unsigned lock:1; unsigned waiting:1; unsigned updated:1; unsigned updating:1; unsigned exists:1; unsigned temp_file:1; unsigned purged:1; unsigned reading:1; unsigned secondary:1; unsigned update_variant:1; unsigned background:1; unsigned stale_updating:1; unsigned stale_error:1; }; typedef struct { ngx_uint_t version; time_t valid_sec; time_t updating_sec; time_t error_sec; time_t last_modified; time_t date; uint32_t crc32; u_short valid_msec; u_short header_start; u_short body_start; u_char etag_len; u_char etag[NGX_HTTP_CACHE_ETAG_LEN]; u_char vary_len; u_char vary[NGX_HTTP_CACHE_VARY_LEN]; u_char variant[NGX_HTTP_CACHE_KEY_LEN]; } ngx_http_file_cache_header_t; typedef struct { ngx_rbtree_t rbtree; ngx_rbtree_node_t sentinel; ngx_queue_t queue; ngx_atomic_t cold; ngx_atomic_t loading; off_t size; ngx_uint_t count; ngx_uint_t watermark; } ngx_http_file_cache_sh_t; struct ngx_http_file_cache_s { ngx_http_file_cache_sh_t *sh; ngx_slab_pool_t *shpool; ngx_path_t *path; off_t min_free; off_t max_size; size_t bsize; time_t inactive; time_t fail_time; ngx_uint_t files; ngx_uint_t loader_files; ngx_msec_t last; ngx_msec_t loader_sleep; ngx_msec_t loader_threshold; ngx_uint_t manager_files; ngx_msec_t manager_sleep; ngx_msec_t manager_threshold; ngx_shm_zone_t *shm_zone; ngx_uint_t use_temp_path; /* unsigned use_temp_path:1 */ }; ngx_int_t ngx_http_file_cache_new(ngx_http_request_t *r); ngx_int_t ngx_http_file_cache_create(ngx_http_request_t *r); void ngx_http_file_cache_create_key(ngx_http_request_t *r); ngx_int_t ngx_http_file_cache_open(ngx_http_request_t *r); ngx_int_t ngx_http_file_cache_set_header(ngx_http_request_t *r, u_char *buf); void ngx_http_file_cache_update(ngx_http_request_t *r, ngx_temp_file_t *tf); void ngx_http_file_cache_update_header(ngx_http_request_t *r); ngx_int_t ngx_http_cache_send(ngx_http_request_t *); void ngx_http_file_cache_free(ngx_http_cache_t *c, ngx_temp_file_t *tf); time_t ngx_http_file_cache_valid(ngx_array_t *cache_valid, ngx_uint_t status); char *ngx_http_file_cache_set_slot(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); char *ngx_http_file_cache_valid_set_slot(ngx_conf_t *cf, ngx_command_t *cmd, void *conf); extern ngx_str_t ngx_http_cache_status[]; #endif /* _NGX_HTTP_CACHE_H_INCLUDED_ */