view limit_req.t @ 1961:fe6f22da53ec default tip

Tests: tests for usage of discarded body. The client_max_body_size limit should be ignored when the request body is already discarded. In HTTP/1.x, this is done by checking the r->discard_body flag when the body is being discarded, and because r->headers_in.content_length_n is 0 when it's already discarded. This, however, does not happen with HTTP/2 and HTTP/3, and therefore "error_page 413" does not work without relaxing the limit. Further, with proxy_pass, r->headers_in.content_length_n is used to determine length of the request body, and therefore is not correct if discarding of the request body isn't yet complete. While discarding the request body, r->headers_in.content_length_n contains the rest of the body to discard (or, in case of chunked request body, the rest of the current chunk to discard). Similarly, the $content_length variable uses r->headers_in.content_length if available, and also incorrect. The $content_length variable is used when proxying with fastcgi_pass, grpc_pass, and uwsgi_pass (scgi_pass uses the value calculated based on the actual request body buffers, and therefore works correctly).
author Maxim Dounin <mdounin@mdounin.ru>
date Sat, 27 Apr 2024 18:55:50 +0300
parents 62e2baa3bc60
children
line wrap: on
line source

#!/usr/bin/perl

# (C) Maxim Dounin

# Tests for nginx limit_req module.

###############################################################################

use warnings;
use strict;

use Test::More;

BEGIN { use FindBin; chdir($FindBin::Bin); }

use lib 'lib';
use Test::Nginx;

###############################################################################

select STDERR; $| = 1;
select STDOUT; $| = 1;

my $t = Test::Nginx->new()->has(qw/http limit_req/)->plan(6);

$t->write_file_expand('nginx.conf', <<'EOF');

%%TEST_GLOBALS%%

daemon off;

events {
}

http {
    %%TEST_GLOBALS_HTTP%%

    limit_req_zone  $binary_remote_addr  zone=one:1m   rate=2r/s;
    limit_req_zone  $binary_remote_addr  zone=long:1m  rate=2r/s;
    limit_req_zone  $binary_remote_addr  zone=fast:1m  rate=1000r/s;

    server {
        listen       127.0.0.1:8080;
        server_name  localhost;
        location / {
            limit_req    zone=one  burst=1  nodelay;
        }
        location /status {
            limit_req    zone=one  burst=1  nodelay;

            limit_req_status  501;
        }
        location /long {
            limit_req    zone=long  burst=5;
        }
        location /fast {
            limit_req    zone=fast  burst=1;
        }
    }
}

EOF

$t->write_file('test1.html', 'XtestX');
$t->write_file('long.html', "1234567890\n" x (1 << 16));
$t->write_file('fast.html', 'XtestX');
$t->run();

###############################################################################

like(http_get('/test1.html'), qr/^HTTP\/1.. 200 /m, 'request');
http_get('/test1.html');
like(http_get('/test1.html'), qr/^HTTP\/1.. 503 /m, 'request rejected');
like(http_get('/status.html'), qr/^HTTP\/1.. 501 /m, 'request rejected status');
http_get('/test1.html');
http_get('/test1.html');

# Second request will be delayed by limit_req, make sure it isn't truncated.
# The bug only manifests itself if buffer will be filled, so sleep for a while
# before reading response.

my $l1 = length(http_get('/long.html'));
my $l2 = length(http_get('/long.html', sleep => 0.6));
is($l2, $l1, 'delayed big request not truncated');

# make sure rejected requests are not counted, and access is again allowed
# after 1/rate seconds

like(http_get('/test1.html'), qr/^HTTP\/1.. 200 /m, 'rejects not counted');

# make sure negative excess values are handled properly

http_get('/fast.html');
select undef, undef, undef, 0.1;
like(http_get('/fast.html'), qr/^HTTP\/1.. 200 /m, 'negative excess');

###############################################################################