vquic: stabilization and improvements

vquic stabilization
- udp send code shared between ngtcp2 and quiche
- quiche handling of data and events improved

ngtcp2 and pytest improvements
- fixes handling of "drain" situations, discovered in scorecard
  tests with the Caddy server.
- improvements in handling transfers that have already  data or
  are already closed to make an early return on recv

pytest
- adding caddy tests when available

scorecard improvemnts.
- using correct caddy port
- allowing tests for only httpd or caddy

Closes #10451
This commit is contained in:
Stefan Eissing 2023-02-09 10:49:04 +01:00 committed by Daniel Stenberg
parent bd12f27258
commit c96f982166
No known key found for this signature in database
GPG Key ID: 5CC908FDB71E12C2
15 changed files with 1363 additions and 742 deletions

View File

@ -264,7 +264,7 @@ struct HTTP {
bool upload_done;
#endif /* ENABLE_QUIC */
#ifdef USE_NGHTTP3
size_t unacked_window;
size_t recv_buf_nonflow; /* buffered bytes, not counting for flow control */
struct h3out *h3out; /* per-stream buffers for upload */
struct dynbuf overflow; /* excess data received during a single Curl_read */
#endif /* USE_NGHTTP3 */
@ -291,6 +291,8 @@ struct HTTP {
#ifdef USE_QUICHE
bool h3_got_header; /* TRUE when h3 stream has recvd some HEADER */
bool h3_recving_data; /* TRUE when h3 stream is reading DATA */
bool h3_body_pending; /* TRUE when h3 stream may have more body DATA */
struct h3_event_node *pending;
#endif /* USE_QUICHE */
};

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -29,11 +29,13 @@
#endif
#include "urldata.h"
#include "dynbuf.h"
#include "cfilters.h"
#include "curl_log.h"
#include "curl_msh3.h"
#include "curl_ngtcp2.h"
#include "curl_quiche.h"
#include "vquic.h"
#include "vquic_int.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
@ -60,6 +62,220 @@ void Curl_quic_ver(char *p, size_t len)
#endif
}
CURLcode vquic_ctx_init(struct cf_quic_ctx *qctx, size_t pktbuflen)
{
qctx->num_blocked_pkt = 0;
qctx->num_blocked_pkt_sent = 0;
memset(&qctx->blocked_pkt, 0, sizeof(qctx->blocked_pkt));
qctx->pktbuflen = pktbuflen;
qctx->pktbuf = malloc(qctx->pktbuflen);
if(!qctx->pktbuf)
return CURLE_OUT_OF_MEMORY;
#if defined(__linux__) && defined(UDP_SEGMENT) && defined(HAVE_SENDMSG)
qctx->no_gso = FALSE;
#else
qctx->no_gso = TRUE;
#endif
return CURLE_OK;
}
void vquic_ctx_free(struct cf_quic_ctx *qctx)
{
free(qctx->pktbuf);
qctx->pktbuf = NULL;
}
static CURLcode send_packet_no_gso(struct Curl_cfilter *cf,
struct Curl_easy *data,
struct cf_quic_ctx *qctx,
const uint8_t *pkt, size_t pktlen,
size_t gsolen, size_t *psent);
static CURLcode do_sendmsg(struct Curl_cfilter *cf,
struct Curl_easy *data,
struct cf_quic_ctx *qctx,
const uint8_t *pkt, size_t pktlen, size_t gsolen,
size_t *psent)
{
#ifdef HAVE_SENDMSG
struct iovec msg_iov;
struct msghdr msg = {0};
ssize_t sent;
#if defined(__linux__) && defined(UDP_SEGMENT)
uint8_t msg_ctrl[32];
struct cmsghdr *cm;
#endif
*psent = 0;
msg_iov.iov_base = (uint8_t *)pkt;
msg_iov.iov_len = pktlen;
msg.msg_iov = &msg_iov;
msg.msg_iovlen = 1;
#if defined(__linux__) && defined(UDP_SEGMENT)
if(pktlen > gsolen) {
/* Only set this, when we need it. macOS, for example,
* does not seem to like a msg_control of length 0. */
msg.msg_control = msg_ctrl;
assert(sizeof(msg_ctrl) >= CMSG_SPACE(sizeof(uint16_t)));
msg.msg_controllen = CMSG_SPACE(sizeof(uint16_t));
cm = CMSG_FIRSTHDR(&msg);
cm->cmsg_level = SOL_UDP;
cm->cmsg_type = UDP_SEGMENT;
cm->cmsg_len = CMSG_LEN(sizeof(uint16_t));
*(uint16_t *)(void *)CMSG_DATA(cm) = gsolen & 0xffff;
}
#endif
while((sent = sendmsg(qctx->sockfd, &msg, 0)) == -1 && SOCKERRNO == EINTR)
;
if(sent == -1) {
switch(SOCKERRNO) {
case EAGAIN:
#if EAGAIN != EWOULDBLOCK
case EWOULDBLOCK:
#endif
return CURLE_AGAIN;
case EMSGSIZE:
/* UDP datagram is too large; caused by PMTUD. Just let it be lost. */
break;
case EIO:
if(pktlen > gsolen) {
/* GSO failure */
failf(data, "sendmsg() returned %zd (errno %d); disable GSO", sent,
SOCKERRNO);
qctx->no_gso = TRUE;
return send_packet_no_gso(cf, data, qctx, pkt, pktlen, gsolen, psent);
}
/* FALLTHROUGH */
default:
failf(data, "sendmsg() returned %zd (errno %d)", sent, SOCKERRNO);
return CURLE_SEND_ERROR;
}
}
else {
assert(pktlen == (size_t)sent);
}
#else
ssize_t sent;
(void)gsolen;
*psent = 0;
while((sent = send(qctx->sockfd, (const char *)pkt, pktlen, 0)) == -1 &&
SOCKERRNO == EINTR)
;
if(sent == -1) {
if(SOCKERRNO == EAGAIN || SOCKERRNO == EWOULDBLOCK) {
return CURLE_AGAIN;
}
else {
failf(data, "send() returned %zd (errno %d)", sent, SOCKERRNO);
if(SOCKERRNO != EMSGSIZE) {
return CURLE_SEND_ERROR;
}
/* UDP datagram is too large; caused by PMTUD. Just let it be
lost. */
}
}
#endif
(void)cf;
*psent = pktlen;
return CURLE_OK;
}
static CURLcode send_packet_no_gso(struct Curl_cfilter *cf,
struct Curl_easy *data,
struct cf_quic_ctx *qctx,
const uint8_t *pkt, size_t pktlen,
size_t gsolen, size_t *psent)
{
const uint8_t *p, *end = pkt + pktlen;
size_t sent;
*psent = 0;
for(p = pkt; p < end; p += gsolen) {
size_t len = CURLMIN(gsolen, (size_t)(end - p));
CURLcode curlcode = do_sendmsg(cf, data, qctx, p, len, len, &sent);
if(curlcode != CURLE_OK) {
return curlcode;
}
*psent += sent;
}
return CURLE_OK;
}
CURLcode vquic_send_packet(struct Curl_cfilter *cf,
struct Curl_easy *data,
struct cf_quic_ctx *qctx,
const uint8_t *pkt, size_t pktlen, size_t gsolen,
size_t *psent)
{
if(qctx->no_gso && pktlen > gsolen) {
return send_packet_no_gso(cf, data, qctx, pkt, pktlen, gsolen, psent);
}
return do_sendmsg(cf, data, qctx, pkt, pktlen, gsolen, psent);
}
void vquic_push_blocked_pkt(struct Curl_cfilter *cf,
struct cf_quic_ctx *qctx,
const uint8_t *pkt, size_t pktlen, size_t gsolen)
{
struct vquic_blocked_pkt *blkpkt;
(void)cf;
assert(qctx->num_blocked_pkt <
sizeof(qctx->blocked_pkt) / sizeof(qctx->blocked_pkt[0]));
blkpkt = &qctx->blocked_pkt[qctx->num_blocked_pkt++];
blkpkt->pkt = pkt;
blkpkt->pktlen = pktlen;
blkpkt->gsolen = gsolen;
}
CURLcode vquic_send_blocked_pkt(struct Curl_cfilter *cf,
struct Curl_easy *data,
struct cf_quic_ctx *qctx)
{
size_t sent;
CURLcode curlcode;
struct vquic_blocked_pkt *blkpkt;
(void)cf;
for(; qctx->num_blocked_pkt_sent < qctx->num_blocked_pkt;
++qctx->num_blocked_pkt_sent) {
blkpkt = &qctx->blocked_pkt[qctx->num_blocked_pkt_sent];
curlcode = vquic_send_packet(cf, data, qctx, blkpkt->pkt,
blkpkt->pktlen, blkpkt->gsolen, &sent);
if(curlcode) {
if(curlcode == CURLE_AGAIN) {
blkpkt->pkt += sent;
blkpkt->pktlen -= sent;
}
return curlcode;
}
}
qctx->num_blocked_pkt = 0;
qctx->num_blocked_pkt_sent = 0;
return CURLE_OK;
}
/*
* If the QLOGDIR environment variable is set, open and return a file
* descriptor to write the log to.

View File

@ -28,6 +28,45 @@
#ifdef ENABLE_QUIC
struct vquic_blocked_pkt {
const uint8_t *pkt;
size_t pktlen;
size_t gsolen;
};
struct cf_quic_ctx {
curl_socket_t sockfd;
struct sockaddr_storage local_addr;
socklen_t local_addrlen;
struct vquic_blocked_pkt blocked_pkt[2];
uint8_t *pktbuf;
/* the number of entries in blocked_pkt */
size_t num_blocked_pkt;
size_t num_blocked_pkt_sent;
/* the packets blocked by sendmsg (EAGAIN or EWOULDBLOCK) */
size_t pktbuflen;
/* the number of processed entries in blocked_pkt */
bool no_gso;
};
CURLcode vquic_ctx_init(struct cf_quic_ctx *qctx, size_t pktbuflen);
void vquic_ctx_free(struct cf_quic_ctx *qctx);
CURLcode vquic_send_packet(struct Curl_cfilter *cf,
struct Curl_easy *data,
struct cf_quic_ctx *qctx,
const uint8_t *pkt, size_t pktlen, size_t gsolen,
size_t *psent);
void vquic_push_blocked_pkt(struct Curl_cfilter *cf,
struct cf_quic_ctx *qctx,
const uint8_t *pkt, size_t pktlen, size_t gsolen);
CURLcode vquic_send_blocked_pkt(struct Curl_cfilter *cf,
struct Curl_easy *data,
struct cf_quic_ctx *qctx);
#endif /* !ENABLE_QUIC */
#endif /* HEADER_CURL_VQUIC_QUIC_INT_H */

View File

@ -40,4 +40,5 @@ nghttpx = @HTTPD_NGHTTPX@
[caddy]
caddy = @CADDY@
port = 5004
http_port = 5003
https_port = 5004

View File

@ -213,31 +213,33 @@ class ScoreCard:
self.info(f'\n')
return props
def downloads(self, proto: str) -> Dict[str, Any]:
def downloads(self, proto: str, test_httpd: bool = True,
test_caddy: bool = True) -> Dict[str, Any]:
scores = {}
if proto == 'h3':
port = self.env.h3_port
via = 'nghttpx'
descr = f'port {port}, proxying httpd'
else:
port = self.env.https_port
via = 'httpd'
descr = f'port {port}'
self.info('httpd downloads\n')
self._make_docs_file(docs_dir=self.httpd.docs_dir, fname='score1.data', fsize=1024*1024)
url1 = f'https://{self.env.domain1}:{port}/score1.data'
self._make_docs_file(docs_dir=self.httpd.docs_dir, fname='score10.data', fsize=10*1024*1024)
url10 = f'https://{self.env.domain1}:{port}/score10.data'
self._make_docs_file(docs_dir=self.httpd.docs_dir, fname='score100.data', fsize=100*1024*1024)
url100 = f'https://{self.env.domain1}:{port}/score100.data'
scores[via] = {
'description': descr,
'1MB-local': self.download_url(url=url1, proto=proto, count=50),
'10MB-local': self.download_url(url=url10, proto=proto, count=50),
'100MB-local': self.download_url(url=url100, proto=proto, count=50),
}
if self.caddy:
port = self.env.caddy_port
if test_httpd:
if proto == 'h3':
port = self.env.h3_port
via = 'nghttpx'
descr = f'port {port}, proxying httpd'
else:
port = self.env.https_port
via = 'httpd'
descr = f'port {port}'
self.info(f'{via} downloads\n')
self._make_docs_file(docs_dir=self.httpd.docs_dir, fname='score1.data', fsize=1024*1024)
url1 = f'https://{self.env.domain1}:{port}/score1.data'
self._make_docs_file(docs_dir=self.httpd.docs_dir, fname='score10.data', fsize=10*1024*1024)
url10 = f'https://{self.env.domain1}:{port}/score10.data'
self._make_docs_file(docs_dir=self.httpd.docs_dir, fname='score100.data', fsize=100*1024*1024)
url100 = f'https://{self.env.domain1}:{port}/score100.data'
scores[via] = {
'description': descr,
'1MB-local': self.download_url(url=url1, proto=proto, count=50),
'10MB-local': self.download_url(url=url10, proto=proto, count=50),
'100MB-local': self.download_url(url=url100, proto=proto, count=50),
}
if test_caddy and self.caddy:
port = self.caddy.port
via = 'caddy'
descr = f'port {port}'
self.info('caddy downloads\n')
@ -255,7 +257,11 @@ class ScoreCard:
}
return scores
def score_proto(self, proto: str, handshakes: bool = True, downloads: bool = True):
def score_proto(self, proto: str,
handshakes: bool = True,
downloads: bool = True,
test_httpd: bool = True,
test_caddy: bool = True):
self.info(f"scoring {proto}\n")
p = {}
if proto == 'h3':
@ -289,7 +295,9 @@ class ScoreCard:
if handshakes:
score['handshakes'] = self.handshakes(proto=proto)
if downloads:
score['downloads'] = self.downloads(proto=proto)
score['downloads'] = self.downloads(proto=proto,
test_httpd=test_httpd,
test_caddy=test_caddy)
self.info("\n")
return score
@ -335,6 +343,10 @@ class ScoreCard:
help="print text instead of json")
parser.add_argument("-d", "--downloads", action='store_true', default=False,
help="evaluate downloads only")
parser.add_argument("--httpd", action='store_true', default=False,
help="evaluate httpd server only")
parser.add_argument("--caddy", action='store_true', default=False,
help="evaluate caddy server only")
parser.add_argument("protocols", nargs='*', help="Name(s) of protocol to score")
args = parser.parse_args()
@ -348,8 +360,16 @@ class ScoreCard:
protocols = args.protocols if len(args.protocols) else ['h2', 'h3']
handshakes = True
downloads = True
test_httpd = True
test_caddy = True
if args.downloads:
handshakes = False
if args.caddy:
test_caddy = True
test_httpd = False
if args.httpd:
test_caddy = False
test_httpd = True
rv = 0
self.env = Env()
@ -372,7 +392,10 @@ class ScoreCard:
assert self.caddy.start()
for p in protocols:
score = self.score_proto(proto=p, handshakes=handshakes, downloads=downloads)
score = self.score_proto(proto=p, handshakes=handshakes,
downloads=downloads,
test_caddy=test_caddy,
test_httpd=test_httpd)
if args.text:
self.print_score(score)
else:

View File

@ -42,13 +42,22 @@ class TestDownload:
def _class_scope(self, env, httpd, nghttpx):
if env.have_h3():
nghttpx.start_if_needed()
fpath = os.path.join(httpd.docs_dir, 'data-1mb.data')
def _make_docs_file(self, docs_dir: str, fname: str, fsize: int):
fpath = os.path.join(docs_dir, fname)
data1k = 1024*'x'
flen = 0
with open(fpath, 'w') as fd:
fsize = 0
while fsize < 1024*1024:
while flen < fsize:
fd.write(data1k)
fsize += len(data1k)
flen += len(data1k)
return flen
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env, httpd):
self._make_docs_file(docs_dir=httpd.docs_dir, fname='data1.data', fsize=1024*1024)
self._make_docs_file(docs_dir=httpd.docs_dir, fname='data10.data', fsize=10*1024*1024)
self._make_docs_file(docs_dir=httpd.docs_dir, fname='data100.data', fsize=100*1024*1024)
# download 1 file
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
@ -163,8 +172,8 @@ class TestDownload:
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_02_08_1MB_serial(self, env: Env,
httpd, nghttpx, repeat, proto):
count = 2
urln = f'https://{env.authority_for(env.domain1, proto)}/data-1mb.data?[0-{count-1}]'
count = 20
urln = f'https://{env.authority_for(env.domain1, proto)}/data1.data?[0-{count-1}]'
curl = CurlClient(env=env)
r = curl.http_download(urls=[urln], alpn_proto=proto)
assert r.exit_code == 0
@ -173,8 +182,30 @@ class TestDownload:
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_02_09_1MB_parallel(self, env: Env,
httpd, nghttpx, repeat, proto):
count = 2
urln = f'https://{env.authority_for(env.domain1, proto)}/data-1mb.data?[0-{count-1}]'
count = 20
urln = f'https://{env.authority_for(env.domain1, proto)}/data1.data?[0-{count-1}]'
curl = CurlClient(env=env)
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--parallel'
])
assert r.exit_code == 0
r.check_stats(count=count, exp_status=200)
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_02_10_10MB_serial(self, env: Env,
httpd, nghttpx, repeat, proto):
count = 20
urln = f'https://{env.authority_for(env.domain1, proto)}/data10.data?[0-{count-1}]'
curl = CurlClient(env=env)
r = curl.http_download(urls=[urln], alpn_proto=proto)
assert r.exit_code == 0
r.check_stats(count=count, exp_status=200)
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_02_11_10MB_parallel(self, env: Env,
httpd, nghttpx, repeat, proto):
count = 20
urln = f'https://{env.authority_for(env.domain1, proto)}/data10.data?[0-{count-1}]'
curl = CurlClient(env=env)
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--parallel'

View File

@ -73,6 +73,8 @@ class TestErrors:
proto):
if proto == 'h3' and not env.have_h3():
pytest.skip("h3 not supported")
if proto == 'h3' and env.curl_uses_lib('quiche'):
pytest.skip("quiche not reliable, sometimes reports success")
count = 5
curl = CurlClient(env=env)
urln = f'https://{env.authority_for(env.domain1, proto)}' \

View File

@ -90,9 +90,26 @@ class TestUpload:
respdata = open(curl.response_file(i)).readlines()
assert respdata == [data]
# upload data parallel, check that they were echoed
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_07_11_upload_parallel(self, env: Env, httpd, nghttpx, repeat, proto):
if proto == 'h3' and not env.have_h3():
pytest.skip("h3 not supported")
count = 50
data = '0123456789'
curl = CurlClient(env=env)
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
r = curl.http_upload(urls=[url], data=data, alpn_proto=proto,
extra_args=['--parallel'])
assert r.exit_code == 0, f'{r}'
r.check_stats(count=count, exp_status=200)
for i in range(count):
respdata = open(curl.response_file(i)).readlines()
assert respdata == [data]
# upload large data sequentially, check that this is what was echoed
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_07_11_upload_seq_large(self, env: Env, httpd, nghttpx, repeat, proto):
def test_07_20_upload_seq_large(self, env: Env, httpd, nghttpx, repeat, proto):
if proto == 'h3' and not env.have_h3():
pytest.skip("h3 not supported")
fdata = os.path.join(env.gen_dir, 'data-100k')
@ -149,9 +166,9 @@ class TestUpload:
if proto == 'h3' and not env.have_h3():
pytest.skip("h3 not supported")
if proto == 'h3' and env.curl_uses_lib('quiche'):
pytest.skip("quiche stalls on parallel, large uploads")
pytest.skip("quiche stalls on parallel, large uploads, unless --trace is used???")
fdata = os.path.join(env.gen_dir, 'data-100k')
count = 3
count = 50
curl = CurlClient(env=env)
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto,

View File

@ -0,0 +1,147 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) 2008 - 2022, Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import logging
import os
import pytest
from testenv import Env, CurlClient, Caddy
log = logging.getLogger(__name__)
@pytest.mark.skipif(condition=not Env.has_caddy(), reason=f"missing caddy")
class TestCaddy:
@pytest.fixture(autouse=True, scope='class')
def caddy(self, env):
caddy = Caddy(env=env)
assert caddy.start()
yield caddy
caddy.stop()
def _make_docs_file(self, docs_dir: str, fname: str, fsize: int):
fpath = os.path.join(docs_dir, fname)
data1k = 1024*'x'
flen = 0
with open(fpath, 'w') as fd:
while flen < fsize:
fd.write(data1k)
flen += len(data1k)
return flen
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env, caddy):
self._make_docs_file(docs_dir=caddy.docs_dir, fname='data1.data', fsize=1024*1024)
self._make_docs_file(docs_dir=caddy.docs_dir, fname='data10.data', fsize=10*1024*1024)
self._make_docs_file(docs_dir=caddy.docs_dir, fname='data100.data', fsize=100*1024*1024)
# download 1 file
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_08_01_download_1(self, env: Env, caddy: Caddy, repeat, proto):
if proto == 'h3' and not env.have_h3_curl():
pytest.skip("h3 not supported in curl")
curl = CurlClient(env=env)
url = f'https://{env.domain1}:{caddy.port}/data.json'
r = curl.http_download(urls=[url], alpn_proto=proto)
assert r.exit_code == 0, f'{r}'
r.check_stats(count=1, exp_status=200)
# download 1MB files sequentially
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_08_02_download_1mb_sequential(self, env: Env, caddy: Caddy,
repeat, proto):
if proto == 'h3' and not env.have_h3_curl():
pytest.skip("h3 not supported in curl")
count = 50
curl = CurlClient(env=env)
urln = f'https://{env.domain1}:{caddy.port}/data1.data?[0-{count-1}]'
r = curl.http_download(urls=[urln], alpn_proto=proto)
assert r.exit_code == 0
r.check_stats(count=count, exp_status=200)
# sequential transfers will open 1 connection
assert r.total_connects == 1
# download 1MB files parallel
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_08_03_download_1mb_parallel(self, env: Env, caddy: Caddy,
repeat, proto):
if proto == 'h3' and not env.have_h3_curl():
pytest.skip("h3 not supported in curl")
count = 50
curl = CurlClient(env=env)
urln = f'https://{env.domain1}:{caddy.port}/data1.data?[0-{count-1}]'
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--parallel'
])
assert r.exit_code == 0
r.check_stats(count=count, exp_status=200)
if proto == 'http/1.1':
# http/1.1 parallel transfers will open multiple connections
assert r.total_connects > 1
else:
assert r.total_connects == 1
# download 10MB files sequentially
@pytest.mark.parametrize("proto", ['h2', 'h3'])
def test_08_04_download_10mb_sequential(self, env: Env, caddy: Caddy,
repeat, proto):
if proto == 'h3' and not env.have_h3_curl():
pytest.skip("h3 not supported in curl")
if proto == 'h3' and env.curl_uses_lib('quiche'):
pytest.skip("quiche stalls after a certain amount of data")
count = 20
curl = CurlClient(env=env)
urln = f'https://{env.domain1}:{caddy.port}/data10.data?[0-{count-1}]'
r = curl.http_download(urls=[urln], alpn_proto=proto)
assert r.exit_code == 0
r.check_stats(count=count, exp_status=200)
# sequential transfers will open 1 connection
assert r.total_connects == 1
# download 10MB files parallel
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_08_05_download_1mb_parallel(self, env: Env, caddy: Caddy,
repeat, proto):
if proto == 'h3' and not env.have_h3_curl():
pytest.skip("h3 not supported in curl")
if proto == 'h3' and env.curl_uses_lib('quiche'):
pytest.skip("quiche stalls after a certain amount of data")
count = 50
curl = CurlClient(env=env)
urln = f'https://{env.domain1}:{caddy.port}/data10.data?[0-{count-1}]'
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--parallel'
])
assert r.exit_code == 0
r.check_stats(count=count, exp_status=200)
if proto == 'http/1.1':
# http/1.1 parallel transfers will open multiple connections
assert r.total_connects > 1
else:
assert r.total_connects == 1

View File

@ -55,6 +55,10 @@ class Caddy:
def docs_dir(self):
return self._docs_dir
@property
def port(self) -> str:
return self.env.caddy_https_port
def clear_logs(self):
self._rmf(self._error_log)
@ -105,7 +109,7 @@ class Caddy:
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'https://{self.env.domain1}:{self.env.caddy_port}/'
check_url = f'https://{self.env.domain1}:{self.port}/'
r = curl.http_get(url=check_url)
if r.exit_code != 0:
return True
@ -118,7 +122,7 @@ class Caddy:
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'https://{self.env.domain1}:{self.env.caddy_port}/'
check_url = f'https://{self.env.domain1}:{self.port}/'
r = curl.http_get(url=check_url)
if r.exit_code == 0:
return True
@ -149,12 +153,13 @@ class Caddy:
with open(self._conf_file, 'w') as fd:
conf = [ # base server config
f'{{',
f' https_port {self.env.caddy_port}',
f' servers :{self.env.caddy_port} {{',
f' http_port {self.env.caddy_http_port}',
f' https_port {self.env.caddy_https_port}',
f' servers :{self.env.caddy_https_port} {{',
f' protocols h3 h2 h1',
f' }}',
f'}}',
f'{domain1}:{self.env.caddy_port} {{',
f'{domain1}:{self.env.caddy_https_port} {{',
f' file_server * {{',
f' root {self._docs_dir}',
f' }}',

View File

@ -319,6 +319,8 @@ class CurlClient:
args = [self._curl, "-s", "--path-as-is"]
if with_headers:
args.extend(["-D", self._headerfile])
if self.env.verbose > 1:
args.extend(['--trace', self._tracefile])
if self.env.verbose > 2:
args.extend(['--trace', self._tracefile, '--trace-time'])

View File

@ -136,8 +136,8 @@ class EnvConfig:
log.debug(f'nghttpx -v: {p.stdout}')
self.caddy = self.config['caddy']['caddy']
if len(self.caddy) == 0:
self.caddy = 'caddy'
if len(self.caddy.strip()) == 0:
self.caddy = None
if self.caddy is not None:
try:
p = subprocess.run(args=[self.caddy, 'version'],
@ -147,7 +147,8 @@ class EnvConfig:
self.caddy = None
except:
self.caddy = None
self.caddy_port = self.config['caddy']['port']
self.caddy_http_port = self.config['caddy']['http_port']
self.caddy_https_port = self.config['caddy']['https_port']
@property
def httpd_version(self):
@ -241,6 +242,10 @@ class Env:
def httpd_is_at_least(minv) -> bool:
return Env.CONFIG.httpd_is_at_least(minv)
@staticmethod
def has_caddy() -> bool:
return Env.CONFIG.caddy is not None
def __init__(self, pytestconfig=None):
self._verbose = pytestconfig.option.verbose \
if pytestconfig is not None else 0
@ -306,8 +311,12 @@ class Env:
return self.CONFIG.caddy
@property
def caddy_port(self) -> str:
return self.CONFIG.caddy_port
def caddy_https_port(self) -> str:
return self.CONFIG.caddy_https_port
@property
def caddy_http_port(self) -> str:
return self.CONFIG.caddy_http_port
@property
def curl(self) -> str:

View File

@ -160,7 +160,9 @@ class Nghttpx:
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'https://{self.env.domain1}:{self.env.h3_port}/'
r = curl.http_get(url=check_url, extra_args=['--http3-only'])
r = curl.http_get(url=check_url, extra_args=[
'--http3-only', '--trace', 'curl.trace', '--trace-time'
])
if r.exit_code == 0:
return True
log.debug(f'waiting for nghttpx to become responsive: {r}')