view limit_req.t @ 1897:38f1fd9ca3e6

Tests: unbreak reading new stderr data after eof. Tests don't expect to stop reading redirected stderr when end of file is reached, but rather to read new data being appended, similar to "tail -f". The behaviour is found changed in Ubuntu 23.04's Perl 5.36, which applies the upstream patch [1] expected for inclusion in the upcoming Perl 5.38. The fix is to clear the filehandle's error state to continue reading. [1] https://github.com/Perl/perl5/commit/80c1f1e45e8e Updated mail_error_log.t and stream_error_log.t for consistency.
author Sergey Kandaurov <pluknet@nginx.com>
date Mon, 29 May 2023 17:27:11 +0400
parents 62e2baa3bc60
children
line wrap: on
line source

#!/usr/bin/perl

# (C) Maxim Dounin

# Tests for nginx limit_req module.

###############################################################################

use warnings;
use strict;

use Test::More;

BEGIN { use FindBin; chdir($FindBin::Bin); }

use lib 'lib';
use Test::Nginx;

###############################################################################

select STDERR; $| = 1;
select STDOUT; $| = 1;

my $t = Test::Nginx->new()->has(qw/http limit_req/)->plan(6);

$t->write_file_expand('nginx.conf', <<'EOF');

%%TEST_GLOBALS%%

daemon off;

events {
}

http {
    %%TEST_GLOBALS_HTTP%%

    limit_req_zone  $binary_remote_addr  zone=one:1m   rate=2r/s;
    limit_req_zone  $binary_remote_addr  zone=long:1m  rate=2r/s;
    limit_req_zone  $binary_remote_addr  zone=fast:1m  rate=1000r/s;

    server {
        listen       127.0.0.1:8080;
        server_name  localhost;
        location / {
            limit_req    zone=one  burst=1  nodelay;
        }
        location /status {
            limit_req    zone=one  burst=1  nodelay;

            limit_req_status  501;
        }
        location /long {
            limit_req    zone=long  burst=5;
        }
        location /fast {
            limit_req    zone=fast  burst=1;
        }
    }
}

EOF

$t->write_file('test1.html', 'XtestX');
$t->write_file('long.html', "1234567890\n" x (1 << 16));
$t->write_file('fast.html', 'XtestX');
$t->run();

###############################################################################

like(http_get('/test1.html'), qr/^HTTP\/1.. 200 /m, 'request');
http_get('/test1.html');
like(http_get('/test1.html'), qr/^HTTP\/1.. 503 /m, 'request rejected');
like(http_get('/status.html'), qr/^HTTP\/1.. 501 /m, 'request rejected status');
http_get('/test1.html');
http_get('/test1.html');

# Second request will be delayed by limit_req, make sure it isn't truncated.
# The bug only manifests itself if buffer will be filled, so sleep for a while
# before reading response.

my $l1 = length(http_get('/long.html'));
my $l2 = length(http_get('/long.html', sleep => 0.6));
is($l2, $l1, 'delayed big request not truncated');

# make sure rejected requests are not counted, and access is again allowed
# after 1/rate seconds

like(http_get('/test1.html'), qr/^HTTP\/1.. 200 /m, 'rejects not counted');

# make sure negative excess values are handled properly

http_get('/fast.html');
select undef, undef, undef, 0.1;
like(http_get('/fast.html'), qr/^HTTP\/1.. 200 /m, 'negative excess');

###############################################################################