view limit_req.t @ 1938:e1059682aeef

Tests: fixed ClientHello with resending Initial QUIC packets. Previously it was rebuilt each time using distinct ClientHello.random resulting in different CRYPTO payload. As such, it led to TLS digest hash and derived secrets mismatch when resending Initial packet. Now ClientHello is built once and reused when resending Initial packets. Additionally, this required to preserve a generated secret value used in shared secret calculation as part of TLS key schedule. Previously it was regenerated when receiving a Retry packet, but this won't work with reused ClientHello as the resulting shared secrets won't match.
author Sergey Kandaurov <pluknet@nginx.com>
date Wed, 30 Aug 2023 02:22:58 +0400
parents 62e2baa3bc60
children
line wrap: on
line source

#!/usr/bin/perl

# (C) Maxim Dounin

# Tests for nginx limit_req module.

###############################################################################

use warnings;
use strict;

use Test::More;

BEGIN { use FindBin; chdir($FindBin::Bin); }

use lib 'lib';
use Test::Nginx;

###############################################################################

select STDERR; $| = 1;
select STDOUT; $| = 1;

my $t = Test::Nginx->new()->has(qw/http limit_req/)->plan(6);

$t->write_file_expand('nginx.conf', <<'EOF');

%%TEST_GLOBALS%%

daemon off;

events {
}

http {
    %%TEST_GLOBALS_HTTP%%

    limit_req_zone  $binary_remote_addr  zone=one:1m   rate=2r/s;
    limit_req_zone  $binary_remote_addr  zone=long:1m  rate=2r/s;
    limit_req_zone  $binary_remote_addr  zone=fast:1m  rate=1000r/s;

    server {
        listen       127.0.0.1:8080;
        server_name  localhost;
        location / {
            limit_req    zone=one  burst=1  nodelay;
        }
        location /status {
            limit_req    zone=one  burst=1  nodelay;

            limit_req_status  501;
        }
        location /long {
            limit_req    zone=long  burst=5;
        }
        location /fast {
            limit_req    zone=fast  burst=1;
        }
    }
}

EOF

$t->write_file('test1.html', 'XtestX');
$t->write_file('long.html', "1234567890\n" x (1 << 16));
$t->write_file('fast.html', 'XtestX');
$t->run();

###############################################################################

like(http_get('/test1.html'), qr/^HTTP\/1.. 200 /m, 'request');
http_get('/test1.html');
like(http_get('/test1.html'), qr/^HTTP\/1.. 503 /m, 'request rejected');
like(http_get('/status.html'), qr/^HTTP\/1.. 501 /m, 'request rejected status');
http_get('/test1.html');
http_get('/test1.html');

# Second request will be delayed by limit_req, make sure it isn't truncated.
# The bug only manifests itself if buffer will be filled, so sleep for a while
# before reading response.

my $l1 = length(http_get('/long.html'));
my $l2 = length(http_get('/long.html', sleep => 0.6));
is($l2, $l1, 'delayed big request not truncated');

# make sure rejected requests are not counted, and access is again allowed
# after 1/rate seconds

like(http_get('/test1.html'), qr/^HTTP\/1.. 200 /m, 'rejects not counted');

# make sure negative excess values are handled properly

http_get('/fast.html');
select undef, undef, undef, 0.1;
like(http_get('/fast.html'), qr/^HTTP\/1.. 200 /m, 'negative excess');

###############################################################################