Mercurial > hg > nginx-tests
view h2_proxy_cache.t @ 1236:93f749c1d5c5
Tests: fixed parallel tests execution with UDP.
Previously, when checking ports availability, a UDP socket was always created
first, then a TCP socket was created. On success, one of UDP and TCP sockets
was closed (depending on the "udp" option) and the second one was used to busy
this port in other scripts. This lead to the following problem: in an attempt
to reopen a UDP socket used in a given testing script it could be stolen by
another script as part of checking ports availability.
To solve this problem, UDP and TCP ports were split into two non-overlapping
ranges: TCP ports are only used in the range 8000-8499, and UDP ports - in
the range 8500-8999. In addition, the order of creating sockets in UDP tests
has been reversed: now a TCP socket used as a lock precedes a UDP socket.
author | Andrey Zelenkov <zelenkov@nginx.com> |
---|---|
date | Thu, 26 Oct 2017 18:00:21 +0300 |
parents | efccab043dd3 |
children | 93453d7858ce |
line wrap: on
line source
#!/usr/bin/perl # (C) Sergey Kandaurov # (C) Nginx, Inc. # Tests for HTTP/2 protocol with cache. ############################################################################### use warnings; use strict; use Test::More; BEGIN { use FindBin; chdir($FindBin::Bin); } use lib 'lib'; use Test::Nginx; use Test::Nginx::HTTP2; ############################################################################### select STDERR; $| = 1; select STDOUT; $| = 1; my $t = Test::Nginx->new()->has(qw/http http_v2 proxy cache rewrite/)->plan(12) ->write_file_expand('nginx.conf', <<'EOF'); %%TEST_GLOBALS%% daemon off; events { } http { %%TEST_GLOBALS_HTTP%% proxy_cache_path %%TESTDIR%%/cache keys_zone=NAME:1m; # quit unfixed nginx timely on different linuces http2_idle_timeout 2s; http2_recv_timeout 2s; server { listen 127.0.0.1:8080 http2; listen 127.0.0.1:8081; server_name localhost; location /cache { proxy_pass http://127.0.0.1:8081/; proxy_cache NAME; proxy_cache_valid 1m; } location /proxy_buffering_off { proxy_pass http://127.0.0.1:8081/; proxy_cache NAME; proxy_cache_valid 1m; proxy_buffering off; } location / { if ($arg_slow) { set $limit_rate 200; } } } } EOF $t->write_file('t.html', 'SEE-THIS'); $t->run(); ############################################################################### # simple proxy cache test my $s = Test::Nginx::HTTP2->new(); my $sid = $s->new_stream({ path => '/cache/t.html' }); my $frames = $s->read(all => [{ sid => $sid, fin => 1 }]); my ($frame) = grep { $_->{type} eq "HEADERS" } @$frames; is($frame->{headers}->{':status'}, '200', 'proxy cache'); my $etag = $frame->{headers}->{'etag'}; ($frame) = grep { $_->{type} eq "DATA" } @$frames; is($frame->{length}, length 'SEE-THIS', 'proxy cache - DATA'); is($frame->{data}, 'SEE-THIS', 'proxy cache - DATA payload'); $t->write_file('t.html', 'NOOP'); $sid = $s->new_stream({ headers => [ { name => ':method', value => 'GET', mode => 0 }, { name => ':scheme', value => 'http', mode => 0 }, { name => ':path', value => '/cache/t.html' }, { name => ':authority', value => 'localhost', mode => 1 }, { name => 'if-none-match', value => $etag }]}); $frames = $s->read(all => [{ sid => $sid, fin => 1 }]); ($frame) = grep { $_->{type} eq "HEADERS" } @$frames; is($frame->{headers}->{':status'}, 304, 'proxy cache conditional'); $t->write_file('t.html', 'SEE-THIS'); # request body with cached response $sid = $s->new_stream({ path => '/cache/t.html', body_more => 1 }); $s->h2_body('TEST'); $frames = $s->read(all => [{ sid => $sid, fin => 1 }]); ($frame) = grep { $_->{type} eq "HEADERS" } @$frames; is($frame->{headers}->{':status'}, 200, 'proxy cache - request body'); $s->h2_ping('SEE-THIS'); $frames = $s->read(all => [{ type => 'PING' }]); ($frame) = grep { $_->{type} eq "PING" && $_->{flags} & 0x1 } @$frames; ok($frame, 'proxy cache - request body - next'); # HEADERS could be received with fin, followed by DATA $s = Test::Nginx::HTTP2->new(); $sid = $s->new_stream({ path => '/cache/t.html?1', method => 'HEAD' }); $frames = $s->read(all => [{ sid => $sid, fin => 1 }], wait => 0.2); push @$frames, $_ for @{$s->read(all => [{ sid => $sid }], wait => 0.2)}; ok(!grep ({ $_->{type} eq "DATA" } @$frames), 'proxy cache HEAD - no body'); # proxy cache - expect no stray empty DATA frame TODO: { local $TODO = 'not yet'; $s = Test::Nginx::HTTP2->new(); $sid = $s->new_stream({ path => '/cache/t.html?2' }); $frames = $s->read(all => [{ sid => $sid, fin => 1 }]); my @data = grep ({ $_->{type} eq "DATA" } @$frames); is(@data, 1, 'proxy cache write - data frames'); is(join(' ', map { $_->{data} } @data), 'SEE-THIS', 'proxy cache write - data'); is(join(' ', map { $_->{flags} } @data), '1', 'proxy cache write - flags'); } # HEAD on empty cache with proxy_buffering off $s = Test::Nginx::HTTP2->new(); $sid = $s->new_stream( { path => '/proxy_buffering_off/t.html?1', method => 'HEAD' }); $frames = $s->read(all => [{ sid => $sid, fin => 1 }]); push @$frames, $_ for @{$s->read(all => [{ sid => $sid }], wait => 0.2)}; ok(!grep ({ $_->{type} eq "DATA" } @$frames), 'proxy cache HEAD buffering off - no body'); # client cancels stream with a cacheable request that was sent to upstream # HEADERS should not be produced for the canceled stream $s = Test::Nginx::HTTP2->new(); $sid = $s->new_stream({ path => '/cache/t.html?slow=1' }); $s->h2_rst($sid, 8); $frames = $s->read(all => [{ sid => $sid, fin => 0x4 }], wait => 1.2); ok(!(grep { $_->{type} eq "HEADERS" } @$frames), 'no headers'); # client closes connection after sending a cacheable request producing alert $s = Test::Nginx::HTTP2->new(); $sid = $s->new_stream({ path => '/cache/t.html?4' }); undef $s; select undef, undef, undef, 0.2; $t->stop(); ###############################################################################