proxy: http2 proxy tunnel implementation

- currently only on debug build and when env variable
  CURL_PROXY_TUNNEL_H2 is present.
- will ALPN negotiate with the proxy server and switch
  tunnel filter based on the protocol negotiated.
- http/1.1 tunnel code moved into cf-h1-proxy.[ch]
- http/2 tunnel code implemented in cf-h2-proxy.[ch]
- tunnel start and ALPN set remains in http_proxy.c
- moving all haproxy related code into cf-haproxy.[ch]

VTLS changes
- SSL filters rely solely on the "alpn" specification they
  are created with and no longer check conn->bits.tls_enable_alpn.
- checks on which ALPN specification to use (or none at all) are
  done in vtls.c when creating the filter.

Testing
- added a nghttpx forward proxy to the pytest setup that
  speaks HTTP/2 and forwards all requests to the Apache httpd
  forward proxy server.
- extending test coverage in test_10 cases
- adding proxy tests for direct/tunnel h1/h2 use of basic auth.
- adding test for http/1.1 and h2 proxy tunneling to pytest

Closes #10780
This commit is contained in:
Stefan Eissing 2023-04-06 09:54:57 +02:00 committed by Daniel Stenberg
parent 8f50e393ab
commit 4ae2d9f24d
No known key found for this signature in database
GPG Key ID: 5CC908FDB71E12C2
44 changed files with 3923 additions and 1732 deletions

View File

@ -108,6 +108,9 @@ LIB_CFILES = \
bufq.c \
bufref.c \
c-hyper.c \
cf-h1-proxy.c \
cf-h2-proxy.c \
cf-haproxy.c \
cf-https-connect.c \
cf-socket.c \
cfilters.c \
@ -235,6 +238,9 @@ LIB_HFILES = \
bufq.h \
bufref.h \
c-hyper.h \
cf-h1-proxy.h \
cf-h2-proxy.h \
cf-haproxy.h \
cf-https-connect.h \
cf-socket.h \
cfilters.h \

1186
lib/cf-h1-proxy.c Normal file

File diff suppressed because it is too large Load Diff

39
lib/cf-h1-proxy.h Normal file
View File

@ -0,0 +1,39 @@
#ifndef HEADER_CURL_H1_PROXY_H
#define HEADER_CURL_H1_PROXY_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#include "curl_setup.h"
#if !defined(CURL_DISABLE_PROXY) && !defined(CURL_DISABLE_HTTP)
CURLcode Curl_cf_h1_proxy_insert_after(struct Curl_cfilter *cf,
struct Curl_easy *data);
extern struct Curl_cftype Curl_cft_h1_proxy;
#endif /* !CURL_DISABLE_PROXY && !CURL_DISABLE_HTTP */
#endif /* HEADER_CURL_H1_PROXY_H */

1388
lib/cf-h2-proxy.c Normal file

File diff suppressed because it is too large Load Diff

39
lib/cf-h2-proxy.h Normal file
View File

@ -0,0 +1,39 @@
#ifndef HEADER_CURL_H2_PROXY_H
#define HEADER_CURL_H2_PROXY_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#include "curl_setup.h"
#if defined(USE_NGHTTP2) && !defined(CURL_DISABLE_PROXY)
CURLcode Curl_cf_h2_proxy_insert_after(struct Curl_cfilter *cf,
struct Curl_easy *data);
extern struct Curl_cftype Curl_cft_h2_proxy;
#endif /* defined(USE_NGHTTP2) && !defined(CURL_DISABLE_PROXY) */
#endif /* HEADER_CURL_H2_PROXY_H */

262
lib/cf-haproxy.c Normal file
View File

@ -0,0 +1,262 @@
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#include "curl_setup.h"
#if !defined(CURL_DISABLE_PROXY)
#include <curl/curl.h>
#include "urldata.h"
#include "cfilters.h"
#include "cf-haproxy.h"
#include "curl_log.h"
#include "multiif.h"
/* The last 3 #include files should be in this order */
#include "curl_printf.h"
#include "curl_memory.h"
#include "memdebug.h"
typedef enum {
HAPROXY_INIT, /* init/default/no tunnel state */
HAPROXY_SEND, /* data_out being sent */
HAPROXY_DONE /* all work done */
} haproxy_state;
struct cf_haproxy_ctx {
int state;
struct dynbuf data_out;
};
static void cf_haproxy_ctx_reset(struct cf_haproxy_ctx *ctx)
{
DEBUGASSERT(ctx);
ctx->state = HAPROXY_INIT;
Curl_dyn_reset(&ctx->data_out);
}
static void cf_haproxy_ctx_free(struct cf_haproxy_ctx *ctx)
{
if(ctx) {
Curl_dyn_free(&ctx->data_out);
free(ctx);
}
}
static CURLcode cf_haproxy_date_out_set(struct Curl_cfilter*cf,
struct Curl_easy *data)
{
struct cf_haproxy_ctx *ctx = cf->ctx;
CURLcode result;
const char *tcp_version;
DEBUGASSERT(ctx);
DEBUGASSERT(ctx->state == HAPROXY_INIT);
#ifdef USE_UNIX_SOCKETS
if(cf->conn->unix_domain_socket)
/* the buffer is large enough to hold this! */
result = Curl_dyn_addn(&ctx->data_out, STRCONST("PROXY UNKNOWN\r\n"));
else {
#endif /* USE_UNIX_SOCKETS */
/* Emit the correct prefix for IPv6 */
tcp_version = cf->conn->bits.ipv6 ? "TCP6" : "TCP4";
result = Curl_dyn_addf(&ctx->data_out, "PROXY %s %s %s %i %i\r\n",
tcp_version,
data->info.conn_local_ip,
data->info.conn_primary_ip,
data->info.conn_local_port,
data->info.conn_primary_port);
#ifdef USE_UNIX_SOCKETS
}
#endif /* USE_UNIX_SOCKETS */
return result;
}
static CURLcode cf_haproxy_connect(struct Curl_cfilter *cf,
struct Curl_easy *data,
bool blocking, bool *done)
{
struct cf_haproxy_ctx *ctx = cf->ctx;
CURLcode result;
size_t len;
DEBUGASSERT(ctx);
if(cf->connected) {
*done = TRUE;
return CURLE_OK;
}
result = cf->next->cft->connect(cf->next, data, blocking, done);
if(result || !*done)
return result;
switch(ctx->state) {
case HAPROXY_INIT:
result = cf_haproxy_date_out_set(cf, data);
if(result)
goto out;
ctx->state = HAPROXY_SEND;
/* FALLTHROUGH */
case HAPROXY_SEND:
len = Curl_dyn_len(&ctx->data_out);
if(len > 0) {
ssize_t written = Curl_conn_send(data, cf->sockindex,
Curl_dyn_ptr(&ctx->data_out),
len, &result);
if(written < 0)
goto out;
Curl_dyn_tail(&ctx->data_out, len - (size_t)written);
if(Curl_dyn_len(&ctx->data_out) > 0) {
result = CURLE_OK;
goto out;
}
}
ctx->state = HAPROXY_DONE;
/* FALLTHROUGH */
default:
Curl_dyn_free(&ctx->data_out);
break;
}
out:
*done = (!result) && (ctx->state == HAPROXY_DONE);
cf->connected = *done;
return result;
}
static void cf_haproxy_destroy(struct Curl_cfilter *cf,
struct Curl_easy *data)
{
(void)data;
DEBUGF(LOG_CF(data, cf, "destroy"));
cf_haproxy_ctx_free(cf->ctx);
}
static void cf_haproxy_close(struct Curl_cfilter *cf,
struct Curl_easy *data)
{
DEBUGF(LOG_CF(data, cf, "close"));
cf->connected = FALSE;
cf_haproxy_ctx_reset(cf->ctx);
if(cf->next)
cf->next->cft->close(cf->next, data);
}
static int cf_haproxy_get_select_socks(struct Curl_cfilter *cf,
struct Curl_easy *data,
curl_socket_t *socks)
{
int fds;
fds = cf->next->cft->get_select_socks(cf->next, data, socks);
if(!fds && cf->next->connected && !cf->connected) {
/* If we are not connected, but the filter "below" is
* and not waiting on something, we are sending. */
socks[0] = Curl_conn_cf_get_socket(cf, data);
return GETSOCK_WRITESOCK(0);
}
return fds;
}
struct Curl_cftype Curl_cft_haproxy = {
"HAPROXY",
0,
0,
cf_haproxy_destroy,
cf_haproxy_connect,
cf_haproxy_close,
Curl_cf_def_get_host,
cf_haproxy_get_select_socks,
Curl_cf_def_data_pending,
Curl_cf_def_send,
Curl_cf_def_recv,
Curl_cf_def_cntrl,
Curl_cf_def_conn_is_alive,
Curl_cf_def_conn_keep_alive,
Curl_cf_def_query,
};
static CURLcode cf_haproxy_create(struct Curl_cfilter **pcf,
struct Curl_easy *data)
{
struct Curl_cfilter *cf = NULL;
struct cf_haproxy_ctx *ctx;
CURLcode result;
(void)data;
ctx = calloc(sizeof(*ctx), 1);
if(!ctx) {
result = CURLE_OUT_OF_MEMORY;
goto out;
}
ctx->state = HAPROXY_INIT;
Curl_dyn_init(&ctx->data_out, DYN_HAXPROXY);
result = Curl_cf_create(&cf, &Curl_cft_haproxy, ctx);
if(result)
goto out;
ctx = NULL;
out:
cf_haproxy_ctx_free(ctx);
*pcf = result? NULL : cf;
return result;
}
CURLcode Curl_conn_haproxy_add(struct Curl_easy *data,
struct connectdata *conn,
int sockindex)
{
struct Curl_cfilter *cf;
CURLcode result;
result = cf_haproxy_create(&cf, data);
if(result)
goto out;
Curl_conn_cf_add(data, conn, sockindex, cf);
out:
return result;
}
CURLcode Curl_cf_haproxy_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data)
{
struct Curl_cfilter *cf;
CURLcode result;
result = cf_haproxy_create(&cf, data);
if(result)
goto out;
Curl_conn_cf_insert_after(cf_at, cf);
out:
return result;
}
#endif /* !CURL_DISABLE_PROXY */

43
lib/cf-haproxy.h Normal file
View File

@ -0,0 +1,43 @@
#ifndef HEADER_CURL_CF_HAPROXY_H
#define HEADER_CURL_CF_HAPROXY_H
/***************************************************************************
* _ _ ____ _
* Project ___| | | | _ \| |
* / __| | | | |_) | |
* | (__| |_| | _ <| |___
* \___|\___/|_| \_\_____|
*
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
*
* This software is licensed as described in the file COPYING, which
* you should have received as part of this distribution. The terms
* are also available at https://curl.se/docs/copyright.html.
*
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
* copies of the Software, and permit persons to whom the Software is
* furnished to do so, under the terms of the COPYING file.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
* SPDX-License-Identifier: curl
*
***************************************************************************/
#include "curl_setup.h"
#include "urldata.h"
#if !defined(CURL_DISABLE_PROXY)
CURLcode Curl_conn_haproxy_add(struct Curl_easy *data,
struct connectdata *conn,
int sockindex);
CURLcode Curl_cf_haproxy_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data);
extern struct Curl_cftype Curl_cft_haproxy;
#endif /* !CURL_DISABLE_PROXY */
#endif /* HEADER_CURL_CF_HAPROXY_H */

View File

@ -283,21 +283,31 @@ void Curl_conn_cf_insert_after(struct Curl_cfilter *cf_at,
*pnext = tail;
}
void Curl_conn_cf_discard(struct Curl_cfilter *cf, struct Curl_easy *data)
bool Curl_conn_cf_discard_sub(struct Curl_cfilter *cf,
struct Curl_cfilter *discard,
struct Curl_easy *data,
bool destroy_always)
{
struct Curl_cfilter **pprev = &cf->conn->cfilter[cf->sockindex];
struct Curl_cfilter **pprev = &cf->next;
bool found = FALSE;
/* remove from chain if still in there */
/* remove from sub-chain and destroy */
DEBUGASSERT(cf);
while (*pprev) {
if (*pprev == cf) {
*pprev = cf->next;
*pprev = discard->next;
discard->next = NULL;
found = TRUE;
break;
}
pprev = &((*pprev)->next);
}
cf->cft->destroy(cf, data);
free(cf);
if(found || destroy_always) {
discard->next = NULL;
discard->cft->destroy(discard, data);
free(discard);
}
return found;
}
CURLcode Curl_conn_cf_connect(struct Curl_cfilter *cf,
@ -405,10 +415,8 @@ bool Curl_conn_is_ip_connected(struct Curl_easy *data, int sockindex)
return FALSE;
}
bool Curl_conn_is_ssl(struct connectdata *conn, int sockindex)
bool Curl_conn_cf_is_ssl(struct Curl_cfilter *cf)
{
struct Curl_cfilter *cf = conn? conn->cfilter[sockindex] : NULL;
for(; cf; cf = cf->next) {
if(cf->cft->flags & CF_TYPE_SSL)
return TRUE;
@ -418,6 +426,11 @@ bool Curl_conn_is_ssl(struct connectdata *conn, int sockindex)
return FALSE;
}
bool Curl_conn_is_ssl(struct connectdata *conn, int sockindex)
{
return conn? Curl_conn_cf_is_ssl(conn->cfilter[sockindex]) : FALSE;
}
bool Curl_conn_is_multiplex(struct connectdata *conn, int sockindex)
{
struct Curl_cfilter *cf = conn? conn->cfilter[sockindex] : NULL;

View File

@ -254,11 +254,16 @@ void Curl_conn_cf_insert_after(struct Curl_cfilter *cf_at,
struct Curl_cfilter *cf_new);
/**
* Discard, e.g. remove and destroy a specific filter instance.
* If the filter is attached to a connection, it will be removed before
* it is destroyed.
* Discard, e.g. remove and destroy `discard` iff
* it still is in the filter chain below `cf`. If `discard`
* is no longer found beneath `cf` return FALSE.
* if `destroy_always` is TRUE, will call `discard`s destroy
* function and free it even if not found in the subchain.
*/
void Curl_conn_cf_discard(struct Curl_cfilter *cf, struct Curl_easy *data);
bool Curl_conn_cf_discard_sub(struct Curl_cfilter *cf,
struct Curl_cfilter *discard,
struct Curl_easy *data,
bool destroy_always);
/**
* Discard all cfilters starting with `*pcf` and clearing it afterwards.
@ -292,6 +297,12 @@ CURLcode Curl_conn_cf_cntrl(struct Curl_cfilter *cf,
bool ignore_result,
int event, int arg1, void *arg2);
/**
* Determine if the connection filter chain is using SSL to the remote host
* (or will be once connected).
*/
bool Curl_conn_cf_is_ssl(struct Curl_cfilter *cf);
/**
* Get the socket used by the filter chain starting at `cf`.
* Returns CURL_SOCKET_BAD if not available.

View File

@ -59,6 +59,7 @@
#include "strerror.h"
#include "cfilters.h"
#include "connect.h"
#include "cf-haproxy.h"
#include "cf-https-connect.h"
#include "cf-socket.h"
#include "select.h"

View File

@ -38,6 +38,9 @@
#include "connect.h"
#include "http2.h"
#include "http_proxy.h"
#include "cf-h1-proxy.h"
#include "cf-h2-proxy.h"
#include "cf-haproxy.h"
#include "cf-https-connect.h"
#include "socks.h"
#include "strtok.h"
@ -160,6 +163,10 @@ static struct Curl_cftype *cf_types[] = {
#endif
#if !defined(CURL_DISABLE_PROXY)
#if !defined(CURL_DISABLE_HTTP)
&Curl_cft_h1_proxy,
#ifdef USE_NGHTTP2
&Curl_cft_h2_proxy,
#endif
&Curl_cft_http_proxy,
#endif /* !CURL_DISABLE_HTTP */
&Curl_cft_haproxy,

File diff suppressed because it is too large Load Diff

View File

@ -25,34 +25,25 @@
***************************************************************************/
#include "curl_setup.h"
#if !defined(CURL_DISABLE_PROXY) && !defined(CURL_DISABLE_HTTP)
#include "urldata.h"
#if !defined(CURL_DISABLE_PROXY)
#if !defined(CURL_DISABLE_HTTP)
/* Default proxy timeout in milliseconds */
#define PROXY_TIMEOUT (3600*1000)
CURLcode Curl_conn_http_proxy_add(struct Curl_easy *data,
struct connectdata *conn,
int sockindex);
void Curl_cf_http_proxy_get_host(struct Curl_cfilter *cf,
struct Curl_easy *data,
const char **phost,
const char **pdisplay_host,
int *pport);
CURLcode Curl_cf_http_proxy_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data);
extern struct Curl_cftype Curl_cft_http_proxy;
#endif /* !CURL_DISABLE_HTTP */
CURLcode Curl_conn_haproxy_add(struct Curl_easy *data,
struct connectdata *conn,
int sockindex);
CURLcode Curl_cf_haproxy_insert_after(struct Curl_cfilter *cf_at,
struct Curl_easy *data);
extern struct Curl_cftype Curl_cft_haproxy;
#endif /* !CURL_DISABLE_PROXY */
#endif /* !CURL_DISABLE_PROXY && !CURL_DISABLE_HTTP */
#endif /* HEADER_CURL_HTTP_PROXY_H */

View File

@ -134,6 +134,7 @@ typedef unsigned int curl_prot_t;
#include "hash.h"
#include "splay.h"
#include "dynbuf.h"
#include "dynhds.h"
/* return the count of bytes sent, or -1 on error */
typedef ssize_t (Curl_send)(struct Curl_easy *data, /* transfer */
@ -1066,6 +1067,9 @@ struct connectdata {
(ftp) */
unsigned char alpn; /* APLN TLS negotiated protocol, a CURL_HTTP_VERSION*
value */
#ifndef CURL_DISABLE_PROXY
unsigned char proxy_alpn; /* APLN of proxy tunnel, CURL_HTTP_VERSION* */
#endif
unsigned char transport; /* one of the TRNSPRT_* defines */
unsigned char ip_version; /* copied from the Curl_easy at creation time */
unsigned char httpversion; /* the HTTP version*10 reported by the server */

View File

@ -1090,33 +1090,6 @@ static int cb_h3_deferred_consume(nghttp3_conn *conn, int64_t stream3_id,
return 0;
}
/* Decode HTTP status code. Returns -1 if no valid status code was
decoded. (duplicate from http2.c) */
static int decode_status_code(const uint8_t *value, size_t len)
{
int i;
int res;
if(len != 3) {
return -1;
}
res = 0;
for(i = 0; i < 3; ++i) {
char c = value[i];
if(c < '0' || c > '9') {
return -1;
}
res *= 10;
res += c - '0';
}
return res;
}
static int cb_h3_end_headers(nghttp3_conn *conn, int64_t stream_id,
int fin, void *user_data, void *stream_user_data)
{
@ -1167,8 +1140,10 @@ static int cb_h3_recv_header(nghttp3_conn *conn, int64_t stream_id,
char line[14]; /* status line is always 13 characters long */
size_t ncopy;
stream->status_code = decode_status_code(h3val.base, h3val.len);
DEBUGASSERT(stream->status_code != -1);
result = Curl_http_decode_status(&stream->status_code,
(const char *)h3val.base, h3val.len);
if(result)
return -1;
ncopy = msnprintf(line, sizeof(line), "HTTP/3 %03d \r\n",
stream->status_code);
DEBUGF(LOG_CF(data, cf, "[h3sid=%" PRId64 "] status: %s",
@ -2539,7 +2514,7 @@ out:
*pcf = (!result)? cf : NULL;
if(result) {
if(udp_cf)
Curl_conn_cf_discard(udp_cf, data);
Curl_conn_cf_discard_sub(cf, udp_cf, data, TRUE);
Curl_safefree(cf);
Curl_safefree(ctx);
}

View File

@ -1547,7 +1547,7 @@ out:
*pcf = (!result)? cf : NULL;
if(result) {
if(udp_cf)
Curl_conn_cf_discard(udp_cf, data);
Curl_conn_cf_discard_sub(cf, udp_cf, data, TRUE);
Curl_safefree(cf);
Curl_safefree(ctx);
}

View File

@ -849,7 +849,7 @@ static CURLcode bearssl_connect_step3(struct Curl_cfilter *cf,
DEBUGASSERT(ssl_connect_3 == connssl->connecting_state);
DEBUGASSERT(backend);
if(cf->conn->bits.tls_enable_alpn) {
if(connssl->alpn) {
const char *proto;
proto = br_ssl_engine_get_selected_protocol(&backend->ctx.eng);

View File

@ -1252,7 +1252,7 @@ static CURLcode gtls_verifyserver(struct Curl_cfilter *cf,
if(result)
goto out;
if(cf->conn->bits.tls_enable_alpn) {
if(connssl->alpn) {
gnutls_datum_t proto;
int rc;

View File

@ -852,14 +852,13 @@ static void HandshakeCallback(PRFileDesc *sock, void *arg)
struct Curl_cfilter *cf = (struct Curl_cfilter *)arg;
struct ssl_connect_data *connssl = cf->ctx;
struct Curl_easy *data = connssl->backend->data;
struct connectdata *conn = cf->conn;
unsigned int buflenmax = 50;
unsigned char buf[50];
unsigned int buflen;
SSLNextProtoState state;
DEBUGASSERT(data);
if(!conn->bits.tls_enable_alpn) {
if(!connssl->alpn) {
return;
}
@ -2096,7 +2095,7 @@ static CURLcode nss_setup_connect(struct Curl_cfilter *cf,
#ifdef SSL_ENABLE_ALPN
if(SSL_OptionSet(backend->handle, SSL_ENABLE_ALPN,
cf->conn->bits.tls_enable_alpn ? PR_TRUE : PR_FALSE)
connssl->alpn ? PR_TRUE : PR_FALSE)
!= SECSuccess)
goto error;
#endif

View File

@ -3950,7 +3950,7 @@ static CURLcode ossl_connect_step2(struct Curl_cfilter *cf,
/* Sets data and len to negotiated protocol, len is 0 if no protocol was
* negotiated
*/
if(cf->conn->bits.tls_enable_alpn) {
if(connssl->alpn) {
const unsigned char *neg_protocol;
unsigned int len;
SSL_get0_alpn_selected(backend->handle, &neg_protocol, &len);

View File

@ -2796,7 +2796,7 @@ check_handshake:
}
#if(CURL_BUILD_MAC_10_13 || CURL_BUILD_IOS_11) && HAVE_BUILTIN_AVAILABLE == 1
if(cf->conn->bits.tls_enable_alpn) {
if(connssl->alpn) {
if(__builtin_available(macOS 10.13.4, iOS 11, tvOS 11, *)) {
CFArrayRef alpnArr = NULL;
CFStringRef chosenProtocol = NULL;

View File

@ -130,6 +130,33 @@ static bool blobcmp(struct curl_blob *first, struct curl_blob *second)
return !memcmp(first->data, second->data, first->len); /* same data */
}
#ifdef USE_SSL
static const struct alpn_spec ALPN_SPEC_H10 = {
{ ALPN_HTTP_1_0 }, 1
};
static const struct alpn_spec ALPN_SPEC_H11 = {
{ ALPN_HTTP_1_1 }, 1
};
#ifdef USE_HTTP2
static const struct alpn_spec ALPN_SPEC_H2_H11 = {
{ ALPN_H2, ALPN_HTTP_1_1 }, 2
};
#endif
static const struct alpn_spec *alpn_get_spec(int httpwant, bool use_alpn)
{
if(!use_alpn)
return NULL;
if(httpwant == CURL_HTTP_VERSION_1_0)
return &ALPN_SPEC_H10;
#ifdef USE_HTTP2
if(httpwant >= CURL_HTTP_VERSION_2)
return &ALPN_SPEC_H2_H11;
#endif
return &ALPN_SPEC_H11;
}
#endif /* USE_SSL */
bool
Curl_ssl_config_matches(struct ssl_primary_config *data,
@ -291,7 +318,7 @@ static bool ssl_prefs_check(struct Curl_easy *data)
}
static struct ssl_connect_data *cf_ctx_new(struct Curl_easy *data,
const struct alpn_spec *alpn)
const struct alpn_spec *alpn)
{
struct ssl_connect_data *ctx;
@ -1733,7 +1760,8 @@ static CURLcode cf_ssl_create(struct Curl_cfilter **pcf,
DEBUGASSERT(data->conn);
ctx = cf_ctx_new(data, Curl_alpn_get_spec(data, conn));
ctx = cf_ctx_new(data, alpn_get_spec(data->state.httpwant,
conn->bits.tls_enable_alpn));
if(!ctx) {
result = CURLE_OUT_OF_MEMORY;
goto out;
@ -1774,6 +1802,7 @@ CURLcode Curl_cf_ssl_insert_after(struct Curl_cfilter *cf_at,
}
#ifndef CURL_DISABLE_PROXY
static CURLcode cf_ssl_proxy_create(struct Curl_cfilter **pcf,
struct Curl_easy *data,
struct connectdata *conn)
@ -1781,8 +1810,17 @@ static CURLcode cf_ssl_proxy_create(struct Curl_cfilter **pcf,
struct Curl_cfilter *cf = NULL;
struct ssl_connect_data *ctx;
CURLcode result;
bool use_alpn = conn->bits.tls_enable_alpn;
int httpwant = CURL_HTTP_VERSION_1_1;
ctx = cf_ctx_new(data, Curl_alpn_get_proxy_spec(data, conn));
#if defined(USE_HTTP2) && defined(DEBUGBUILD)
if(conn->bits.tunnel_proxy && getenv("CURL_PROXY_TUNNEL_H2")) {
use_alpn = TRUE;
httpwant = CURL_HTTP_VERSION_2;
}
#endif
ctx = cf_ctx_new(data, alpn_get_spec(httpwant, use_alpn));
if(!ctx) {
result = CURLE_OUT_OF_MEMORY;
goto out;
@ -1851,15 +1889,16 @@ void *Curl_ssl_get_internals(struct Curl_easy *data, int sockindex,
CURLcode Curl_ssl_cfilter_remove(struct Curl_easy *data,
int sockindex)
{
struct Curl_cfilter *cf = data->conn? data->conn->cfilter[sockindex] : NULL;
struct Curl_cfilter *cf, *head;
CURLcode result = CURLE_OK;
(void)data;
for(; cf; cf = cf->next) {
head = data->conn? data->conn->cfilter[sockindex] : NULL;
for(cf = head; cf; cf = cf->next) {
if(cf->cft == &Curl_cft_ssl) {
if(Curl_ssl->shut_down(cf, data))
result = CURLE_SSL_SHUTDOWN_FAILED;
Curl_conn_cf_discard(cf, data);
Curl_conn_cf_discard_sub(head, cf, data, FALSE);
break;
}
}
@ -1943,42 +1982,6 @@ struct Curl_cfilter *Curl_ssl_cf_get_ssl(struct Curl_cfilter *cf)
return NULL;
}
static const struct alpn_spec ALPN_SPEC_H10 = {
{ ALPN_HTTP_1_0 }, 1
};
static const struct alpn_spec ALPN_SPEC_H11 = {
{ ALPN_HTTP_1_1 }, 1
};
#ifdef USE_HTTP2
static const struct alpn_spec ALPN_SPEC_H2_H11 = {
{ ALPN_H2, ALPN_HTTP_1_1 }, 2
};
#endif
const struct alpn_spec *
Curl_alpn_get_spec(struct Curl_easy *data, struct connectdata *conn)
{
if(!conn->bits.tls_enable_alpn)
return NULL;
if(data->state.httpwant == CURL_HTTP_VERSION_1_0)
return &ALPN_SPEC_H10;
#ifdef USE_HTTP2
if(data->state.httpwant >= CURL_HTTP_VERSION_2)
return &ALPN_SPEC_H2_H11;
#endif
return &ALPN_SPEC_H11;
}
const struct alpn_spec *
Curl_alpn_get_proxy_spec(struct Curl_easy *data, struct connectdata *conn)
{
if(!conn->bits.tls_enable_alpn)
return NULL;
if(data->state.httpwant == CURL_HTTP_VERSION_1_0)
return &ALPN_SPEC_H10;
return &ALPN_SPEC_H11;
}
CURLcode Curl_alpn_to_proto_buf(struct alpn_proto_buf *buf,
const struct alpn_spec *spec)
{
@ -2031,32 +2034,34 @@ CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf,
size_t proto_len)
{
int can_multi = 0;
unsigned char *palpn = Curl_ssl_cf_is_proxy(cf)?
&cf->conn->proxy_alpn : &cf->conn->alpn;
if(proto && proto_len) {
if(proto_len == ALPN_HTTP_1_1_LENGTH &&
!memcmp(ALPN_HTTP_1_1, proto, ALPN_HTTP_1_1_LENGTH)) {
cf->conn->alpn = CURL_HTTP_VERSION_1_1;
*palpn = CURL_HTTP_VERSION_1_1;
}
else if(proto_len == ALPN_HTTP_1_0_LENGTH &&
!memcmp(ALPN_HTTP_1_0, proto, ALPN_HTTP_1_0_LENGTH)) {
cf->conn->alpn = CURL_HTTP_VERSION_1_0;
*palpn = CURL_HTTP_VERSION_1_0;
}
#ifdef USE_HTTP2
else if(proto_len == ALPN_H2_LENGTH &&
!memcmp(ALPN_H2, proto, ALPN_H2_LENGTH)) {
cf->conn->alpn = CURL_HTTP_VERSION_2;
*palpn = CURL_HTTP_VERSION_2;
can_multi = 1;
}
#endif
#ifdef USE_HTTP3
else if(proto_len == ALPN_H3_LENGTH &&
!memcmp(ALPN_H3, proto, ALPN_H3_LENGTH)) {
cf->conn->alpn = CURL_HTTP_VERSION_3;
*palpn = CURL_HTTP_VERSION_3;
can_multi = 1;
}
#endif
else {
cf->conn->alpn = CURL_HTTP_VERSION_NONE;
*palpn = CURL_HTTP_VERSION_NONE;
failf(data, "unsupported ALPN protocol: '%.*s'", (int)proto_len, proto);
/* TODO: do we want to fail this? Previous code just ignored it and
* some vtls backends even ignore the return code of this function. */
@ -2066,12 +2071,14 @@ CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf,
infof(data, VTLS_INFOF_ALPN_ACCEPTED_LEN_1STR, (int)proto_len, proto);
}
else {
cf->conn->alpn = CURL_HTTP_VERSION_NONE;
*palpn = CURL_HTTP_VERSION_NONE;
infof(data, VTLS_INFOF_NO_ALPN);
}
out:
Curl_multiuse_state(data, can_multi? BUNDLE_MULTIPLEX : BUNDLE_NO_MULTIUSE);
if(!Curl_ssl_cf_is_proxy(cf))
Curl_multiuse_state(data, can_multi?
BUNDLE_MULTIPLEX : BUNDLE_NO_MULTIUSE);
return CURLE_OK;
}

View File

@ -65,58 +65,6 @@ CURLsslset Curl_init_sslset_nolock(curl_sslbackend id, const char *name,
#define CURL_SHA256_DIGEST_LENGTH 32 /* fixed size */
#endif
/* see https://www.iana.org/assignments/tls-extensiontype-values/ */
#define ALPN_HTTP_1_1_LENGTH 8
#define ALPN_HTTP_1_1 "http/1.1"
#define ALPN_HTTP_1_0_LENGTH 8
#define ALPN_HTTP_1_0 "http/1.0"
#define ALPN_H2_LENGTH 2
#define ALPN_H2 "h2"
#define ALPN_H3_LENGTH 2
#define ALPN_H3 "h3"
/* conservative sizes on the ALPN entries and count we are handling,
* we can increase these if we ever feel the need or have to accommodate
* ALPN strings from the "outside". */
#define ALPN_NAME_MAX 10
#define ALPN_ENTRIES_MAX 3
#define ALPN_PROTO_BUF_MAX (ALPN_ENTRIES_MAX * (ALPN_NAME_MAX + 1))
struct alpn_spec {
const char entries[ALPN_ENTRIES_MAX][ALPN_NAME_MAX];
size_t count; /* number of entries */
};
struct alpn_proto_buf {
unsigned char data[ALPN_PROTO_BUF_MAX];
int len;
};
CURLcode Curl_alpn_to_proto_buf(struct alpn_proto_buf *buf,
const struct alpn_spec *spec);
CURLcode Curl_alpn_to_proto_str(struct alpn_proto_buf *buf,
const struct alpn_spec *spec);
CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf,
struct Curl_easy *data,
const unsigned char *proto,
size_t proto_len);
/**
* Get the ALPN specification to use for talking to remote host.
* May return NULL if ALPN is disabled on the connection.
*/
const struct alpn_spec *
Curl_alpn_get_spec(struct Curl_easy *data, struct connectdata *conn);
/**
* Get the ALPN specification to use for talking to the proxy.
* May return NULL if ALPN is disabled on the connection.
*/
const struct alpn_spec *
Curl_alpn_get_proxy_spec(struct Curl_easy *data, struct connectdata *conn);
char *Curl_ssl_snihost(struct Curl_easy *data, const char *host, size_t *olen);
bool Curl_ssl_config_matches(struct ssl_primary_config *data,
struct ssl_primary_config *needle);

View File

@ -29,17 +29,55 @@
#ifdef USE_SSL
/* see https://www.iana.org/assignments/tls-extensiontype-values/ */
#define ALPN_HTTP_1_1_LENGTH 8
#define ALPN_HTTP_1_1 "http/1.1"
#define ALPN_HTTP_1_0_LENGTH 8
#define ALPN_HTTP_1_0 "http/1.0"
#define ALPN_H2_LENGTH 2
#define ALPN_H2 "h2"
#define ALPN_H3_LENGTH 2
#define ALPN_H3 "h3"
/* conservative sizes on the ALPN entries and count we are handling,
* we can increase these if we ever feel the need or have to accommodate
* ALPN strings from the "outside". */
#define ALPN_NAME_MAX 10
#define ALPN_ENTRIES_MAX 3
#define ALPN_PROTO_BUF_MAX (ALPN_ENTRIES_MAX * (ALPN_NAME_MAX + 1))
struct alpn_spec {
const char entries[ALPN_ENTRIES_MAX][ALPN_NAME_MAX];
size_t count; /* number of entries */
};
struct alpn_proto_buf {
unsigned char data[ALPN_PROTO_BUF_MAX];
int len;
};
CURLcode Curl_alpn_to_proto_buf(struct alpn_proto_buf *buf,
const struct alpn_spec *spec);
CURLcode Curl_alpn_to_proto_str(struct alpn_proto_buf *buf,
const struct alpn_spec *spec);
CURLcode Curl_alpn_set_negotiated(struct Curl_cfilter *cf,
struct Curl_easy *data,
const unsigned char *proto,
size_t proto_len);
/* Information in each SSL cfilter context: cf->ctx */
struct ssl_connect_data {
ssl_connection_state state;
ssl_connect_state connecting_state;
char *hostname; /* hostname for verification */
char *dispname; /* display version of hostname */
int port; /* remote port at origin */
const struct alpn_spec *alpn; /* ALPN to use or NULL for none */
struct ssl_backend_data *backend; /* vtls backend specific props */
struct cf_call_data call_data; /* data handle used in current call */
struct curltime handshake_done; /* time when handshake finished */
int port; /* remote port at origin */
BIT(use_alpn); /* if ALPN shall be used in handshake */
};

View File

@ -854,7 +854,7 @@ wolfssl_connect_step2(struct Curl_cfilter *cf, struct Curl_easy *data)
}
#ifdef HAVE_ALPN
if(cf->conn->bits.tls_enable_alpn) {
if(connssl->alpn) {
int rc;
char *protocol = NULL;
unsigned short protocol_len = 0;

View File

@ -31,8 +31,7 @@ import pytest
sys.path.append(os.path.join(os.path.dirname(__file__), '.'))
from testenv import Env, Nghttpx, Httpd
from testenv import Env, Nghttpx, Httpd, NghttpxQuic, NghttpxFwd
@pytest.fixture(scope="package")
def env(pytestconfig) -> Env:
@ -68,7 +67,16 @@ def httpd(env) -> Httpd:
@pytest.fixture(scope='package')
def nghttpx(env, httpd) -> Optional[Nghttpx]:
nghttpx = Nghttpx(env=env)
nghttpx = NghttpxQuic(env=env)
if env.have_h3():
nghttpx.clear_logs()
assert nghttpx.start()
yield nghttpx
nghttpx.stop()
@pytest.fixture(scope='package')
def nghttpx_fwd(env, httpd) -> Optional[Nghttpx]:
nghttpx = NghttpxFwd(env=env)
if env.have_h3():
nghttpx.clear_logs()
assert nghttpx.start()

View File

@ -35,7 +35,6 @@ from typing import Dict, Any, Optional, List
from testenv import Env, Httpd, Nghttpx, CurlClient, Caddy, ExecResult
log = logging.getLogger(__name__)

View File

@ -46,37 +46,34 @@ class TestBasic:
curl = CurlClient(env=env)
url = f'http://{env.domain1}:{env.http_port}/data.json'
r = curl.http_get(url=url)
r.check_exit_code(0)
assert r.response['status'] == 200
r.check_response(http_status=200)
assert r.json['server'] == env.domain1
# simple https: GET, any http version
@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
def test_01_02_https_get(self, env: Env, httpd):
curl = CurlClient(env=env)
url = f'https://{env.domain1}:{env.https_port}/data.json'
r = curl.http_get(url=url)
r.check_exit_code(0)
assert r.response['status'] == 200
r.check_response(http_status=200)
assert r.json['server'] == env.domain1
# simple https: GET, h2 wanted and got
@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
def test_01_03_h2_get(self, env: Env, httpd):
curl = CurlClient(env=env)
url = f'https://{env.domain1}:{env.https_port}/data.json'
r = curl.http_get(url=url, extra_args=['--http2'])
r.check_exit_code(0)
assert r.response['status'] == 200
assert r.response['protocol'] == 'HTTP/2'
r.check_response(http_status=200, protocol='HTTP/2')
assert r.json['server'] == env.domain1
# simple https: GET, h2 unsupported, fallback to h1
@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
def test_01_04_h2_unsupported(self, env: Env, httpd):
curl = CurlClient(env=env)
url = f'https://{env.domain2}:{env.https_port}/data.json'
r = curl.http_get(url=url, extra_args=['--http2'])
r.check_exit_code(0)
assert r.response['status'] == 200
assert r.response['protocol'] == 'HTTP/1.1'
r.check_response(http_status=200, protocol='HTTP/1.1')
assert r.json['server'] == env.domain2
# simple h3: GET, want h3 and get it
@ -85,7 +82,5 @@ class TestBasic:
curl = CurlClient(env=env)
url = f'https://{env.domain1}:{env.h3_port}/data.json'
r = curl.http_get(url=url, extra_args=['--http3'])
r.check_exit_code(0)
assert r.response['status'] == 200
assert r.response['protocol'] == 'HTTP/3'
r.check_response(http_status=200, protocol='HTTP/3')
assert r.json['server'] == env.domain1

View File

@ -59,8 +59,7 @@ class TestDownload:
curl = CurlClient(env=env)
url = f'https://{env.authority_for(env.domain1, proto)}/data.json'
r = curl.http_download(urls=[url], alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(http_status=200)
# download 2 files
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
@ -70,8 +69,7 @@ class TestDownload:
curl = CurlClient(env=env)
url = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-1]'
r = curl.http_download(urls=[url], alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=2, exp_status=200)
r.check_response(http_status=200, count=2)
# download 100 files sequentially
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
@ -82,10 +80,7 @@ class TestDownload:
curl = CurlClient(env=env)
urln = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-99]'
r = curl.http_download(urls=[urln], alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=100, exp_status=200)
# http/1.1 sequential transfers will open 1 connection
assert r.total_connects == 1
r.check_response(http_status=200, count=100, connect_count=1)
# download 100 files parallel
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
@ -99,14 +94,13 @@ class TestDownload:
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--parallel', '--parallel-max', f'{max_parallel}'
])
r.check_exit_code(0)
r.check_stats(count=100, exp_status=200)
r.check_response(http_status=200, count=100)
if proto == 'http/1.1':
# http/1.1 parallel transfers will open multiple connections
assert r.total_connects > 1
assert r.total_connects > 1, r.dump_logs()
else:
# http2 parallel transfers will use one connection (common limit is 100)
assert r.total_connects == 1
assert r.total_connects == 1, r.dump_logs()
# download 500 files sequential
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
@ -119,14 +113,13 @@ class TestDownload:
curl = CurlClient(env=env)
urln = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-499]'
r = curl.http_download(urls=[urln], alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=500, exp_status=200)
r.check_response(http_status=200, count=500)
if proto == 'http/1.1':
# http/1.1 parallel transfers will open multiple connections
assert r.total_connects > 1
assert r.total_connects > 1, r.dump_logs()
else:
# http2 parallel transfers will use one connection (common limit is 100)
assert r.total_connects == 1
assert r.total_connects == 1, r.dump_logs()
# download 500 files parallel
@pytest.mark.parametrize("proto", ['h2', 'h3'])
@ -141,10 +134,7 @@ class TestDownload:
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--parallel', '--parallel-max', f'{max_parallel}'
])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
# http2 parallel transfers will use one connection (common limit is 100)
assert r.total_connects == 1
r.check_response(http_status=200, count=count, connect_count=1)
# download files parallel, check connection reuse/multiplex
@pytest.mark.parametrize("proto", ['h2', 'h3'])
@ -159,8 +149,7 @@ class TestDownload:
with_stats=True, extra_args=[
'--parallel', '--parallel-max', '200'
])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(http_status=200, count=count)
# should have used at most 2 connections only (test servers allow 100 req/conn)
# it may be just 1 on slow systems where request are answered faster than
# curl can exhaust the capacity or if curl runs with address-sanitizer speed
@ -177,8 +166,7 @@ class TestDownload:
with_stats=True, extra_args=[
'--parallel'
])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
# http/1.1 should have used count connections
assert r.total_connects == count, "http/1.1 should use this many connections"
@ -191,8 +179,7 @@ class TestDownload:
urln = f'https://{env.authority_for(env.domain1, proto)}/data-1m?[0-{count-1}]'
curl = CurlClient(env=env)
r = curl.http_download(urls=[urln], alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_02_09_1MB_parallel(self, env: Env,
@ -205,8 +192,7 @@ class TestDownload:
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--parallel'
])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_02_10_10MB_serial(self, env: Env,
@ -217,8 +203,7 @@ class TestDownload:
urln = f'https://{env.authority_for(env.domain1, proto)}/data-10m?[0-{count-1}]'
curl = CurlClient(env=env)
r = curl.http_download(urls=[urln], alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_02_11_10MB_parallel(self, env: Env,
@ -233,8 +218,7 @@ class TestDownload:
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--parallel'
])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
@pytest.mark.parametrize("proto", ['h2', 'h3'])
def test_02_12_head_serial_https(self, env: Env,
@ -247,8 +231,7 @@ class TestDownload:
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--head'
])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
@pytest.mark.parametrize("proto", ['h2'])
def test_02_13_head_serial_h2c(self, env: Env,
@ -261,8 +244,7 @@ class TestDownload:
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--head', '--http2-prior-knowledge', '--fail-early'
])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
def test_02_20_h2_small_frames(self, env: Env, httpd, repeat):
# Test case to reproduce content corruption as observed in
@ -288,8 +270,7 @@ class TestDownload:
r = curl.http_download(urls=[urln], alpn_proto="h2", extra_args=[
'--parallel', '--parallel-max', '2'
])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
srcfile = os.path.join(httpd.docs_dir, 'data-1m')
for i in range(count):
dfile = curl.download_file(i)

View File

@ -66,8 +66,7 @@ class TestGoAway:
assert httpd.reload()
t.join()
r: ExecResult = self.r
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
# reload will shut down the connection gracefully with GOAWAY
# we expect to see a second connection opened afterwards
assert r.total_connects == 2
@ -101,16 +100,14 @@ class TestGoAway:
assert nghttpx.reload(timeout=timedelta(seconds=2))
t.join()
r: ExecResult = self.r
r.check_exit_code(0)
# this should take `count` seconds to retrieve
assert r.duration >= timedelta(seconds=count)
r.check_response(count=count, http_status=200, connect_count=2)
# reload will shut down the connection gracefully with GOAWAY
# we expect to see a second connection opened afterwards
assert r.total_connects == 2
for idx, s in enumerate(r.stats):
if s['num_connects'] > 0:
log.debug(f'request {idx} connected')
# this should take `count` seconds to retrieve
assert r.duration >= timedelta(seconds=count)
r.check_stats(count=count, exp_status=200, exp_exitcode=0)
# download files sequentially with delay, reload server for GOAWAY
def test_03_03_h1_goaway(self, env: Env, httpd, nghttpx, repeat):
@ -133,11 +130,9 @@ class TestGoAway:
assert httpd.reload()
t.join()
r: ExecResult = self.r
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200, connect_count=2)
# reload will shut down the connection gracefully with GOAWAY
# we expect to see a second connection opened afterwards
assert r.total_connects == 2
for idx, s in enumerate(r.stats):
if s['num_connects'] > 0:
log.debug(f'request {idx} connected')

View File

@ -55,8 +55,7 @@ class TestStuttered:
f'/curltest/tweak?id=[0-{count - 1}]'\
'&chunks=100&chunk_size=100&chunk_delay=10ms'
r = curl.http_download(urls=[urln], alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
# download 50 files in 100 chunks a 100 bytes with 10ms delay between
# prepend 100 file requests to warm up connection processing limits
@ -75,8 +74,7 @@ class TestStuttered:
'&chunks=100&chunk_size=100&chunk_delay=10ms'
r = curl.http_download(urls=[url1, urln], alpn_proto=proto,
extra_args=['--parallel'])
r.check_exit_code(0)
r.check_stats(count=warmups+count, exp_status=200)
r.check_response(count=warmups+count, http_status=200)
assert r.total_connects == 1
t_avg, i_min, t_min, i_max, t_max = self.stats_spread(r.stats[warmups:], 'time_total')
if t_max < (5 * t_min) and t_min < 2:
@ -98,8 +96,7 @@ class TestStuttered:
'&chunks=1000&chunk_size=10&chunk_delay=100us'
r = curl.http_download(urls=[url1, urln], alpn_proto=proto,
extra_args=['--parallel'])
r.check_exit_code(0)
r.check_stats(count=warmups+count, exp_status=200)
r.check_response(count=warmups+count, http_status=200)
assert r.total_connects == 1
t_avg, i_min, t_min, i_max, t_max = self.stats_spread(r.stats[warmups:], 'time_total')
if t_max < (5 * t_min):
@ -121,8 +118,7 @@ class TestStuttered:
'&chunks=10000&chunk_size=1&chunk_delay=50us'
r = curl.http_download(urls=[url1, urln], alpn_proto=proto,
extra_args=['--parallel'])
r.check_exit_code(0)
r.check_stats(count=warmups+count, exp_status=200)
r.check_response(count=warmups+count, http_status=200)
assert r.total_connects == 1
t_avg, i_min, t_min, i_max, t_max = self.stats_spread(r.stats[warmups:], 'time_total')
if t_max < (5 * t_min):

View File

@ -62,7 +62,7 @@ class TestErrors:
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--retry', '0'
])
r.check_exit_code_not(0)
r.check_exit_code(False)
invalid_stats = []
for idx, s in enumerate(r.stats):
if 'exitcode' not in s or s['exitcode'] not in [18, 56, 92]:
@ -85,7 +85,7 @@ class TestErrors:
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--retry', '0', '--parallel',
])
r.check_exit_code_not(0)
r.check_exit_code(False)
assert len(r.stats) == count, f'did not get all stats: {r}'
invalid_stats = []
for idx, s in enumerate(r.stats):

View File

@ -50,8 +50,7 @@ class TestEyeballs:
curl = CurlClient(env=env)
urln = f'https://{env.authority_for(env.domain1, "h3")}/data.json'
r = curl.http_download(urls=[urln], extra_args=['--http3-only'])
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
assert r.stats[0]['http_version'] == '3'
# download using only HTTP/3 on missing server
@ -61,7 +60,7 @@ class TestEyeballs:
curl = CurlClient(env=env)
urln = f'https://{env.authority_for(env.domain1, "h3")}/data.json'
r = curl.http_download(urls=[urln], extra_args=['--http3-only'])
r.check_exit_code(7)
r.check_response(exitcode=7, http_status=None)
# download using HTTP/3 on missing server with fallback on h2
@pytest.mark.skipif(condition=not Env.have_h3(), reason=f"missing HTTP/3 support")
@ -70,8 +69,7 @@ class TestEyeballs:
curl = CurlClient(env=env)
urln = f'https://{env.authority_for(env.domain1, "h3")}/data.json'
r = curl.http_download(urls=[urln], extra_args=['--http3'])
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
assert r.stats[0]['http_version'] == '2'
# download using HTTP/3 on missing server with fallback on http/1.1
@ -81,8 +79,7 @@ class TestEyeballs:
curl = CurlClient(env=env)
urln = f'https://{env.authority_for(env.domain2, "h3")}/data.json'
r = curl.http_download(urls=[urln], extra_args=['--http3'])
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
assert r.stats[0]['http_version'] == '1.1'
# make a successful https: transfer and observer the timer stats
@ -90,8 +87,7 @@ class TestEyeballs:
curl = CurlClient(env=env)
urln = f'https://{env.authority_for(env.domain1, "h2")}/data.json'
r = curl.http_download(urls=[urln])
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
assert r.stats[0]['time_connect'] > 0.0
assert r.stats[0]['time_appconnect'] > 0.0
@ -102,8 +98,7 @@ class TestEyeballs:
r = curl.http_download(urls=[urln], extra_args=[
'--resolve', f'not-valid.com:{env.https_port}:127.0.0.1'
])
r.check_exit_code_not(0)
r.check_stats(count=1, exp_status=0)
r.check_response(count=1, http_status=0, exitcode=False)
assert r.stats[0]['time_connect'] > 0.0 # was tcp connected
assert r.stats[0]['time_appconnect'] == 0 # but not SSL verified
@ -114,8 +109,7 @@ class TestEyeballs:
r = curl.http_download(urls=[urln], extra_args=[
'--resolve', f'not-valid.com:{1}:127.0.0.1'
])
r.check_exit_code_not(0)
r.check_stats(count=1, exp_status=0)
r.check_response(count=1, http_status=None, exitcode=False)
assert r.stats[0]['time_connect'] == 0 # no one should have listened
assert r.stats[0]['time_appconnect'] == 0 # did not happen either

View File

@ -58,8 +58,7 @@ class TestUpload:
curl = CurlClient(env=env)
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
r = curl.http_upload(urls=[url], data=data, alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
respdata = open(curl.response_file(0)).readlines()
assert respdata == [data]
@ -74,8 +73,7 @@ class TestUpload:
curl = CurlClient(env=env)
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
indata = open(fdata).readlines()
respdata = open(curl.response_file(0)).readlines()
assert respdata == indata
@ -92,8 +90,7 @@ class TestUpload:
curl = CurlClient(env=env)
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
r = curl.http_upload(urls=[url], data=data, alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
for i in range(count):
respdata = open(curl.response_file(i)).readlines()
assert respdata == [data]
@ -112,8 +109,7 @@ class TestUpload:
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
r = curl.http_upload(urls=[url], data=data, alpn_proto=proto,
extra_args=['--parallel'])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
for i in range(count):
respdata = open(curl.response_file(i)).readlines()
assert respdata == [data]
@ -130,10 +126,9 @@ class TestUpload:
curl = CurlClient(env=env)
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
indata = open(fdata).readlines()
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
for i in range(count):
respdata = open(curl.response_file(i)).readlines()
assert respdata == indata
@ -150,10 +145,8 @@ class TestUpload:
curl = CurlClient(env=env)
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
indata = open(fdata).readlines()
r.check_stats(count=count, exp_status=200)
for i in range(count):
respdata = open(curl.response_file(i)).readlines()
assert respdata == indata
@ -172,8 +165,7 @@ class TestUpload:
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
r = curl.http_upload(urls=[url], data=data, alpn_proto=proto,
extra_args=['--parallel'])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
for i in range(count):
respdata = open(curl.response_file(i)).readlines()
assert respdata == [data]
@ -192,8 +184,7 @@ class TestUpload:
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-{count-1}]'
r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto,
extra_args=['--parallel'])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
self.check_download(count, fdata, curl)
# PUT 100k
@ -209,10 +200,9 @@ class TestUpload:
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-{count-1}]'
r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto,
extra_args=['--parallel'])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
exp_data = [f'{os.path.getsize(fdata)}']
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
for i in range(count):
respdata = open(curl.response_file(i)).readlines()
assert respdata == exp_data
@ -230,10 +220,9 @@ class TestUpload:
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-{count-1}]&chunk_delay=10ms'
r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto,
extra_args=['--parallel'])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
exp_data = [f'{os.path.getsize(fdata)}']
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
for i in range(count):
respdata = open(curl.response_file(i)).readlines()
assert respdata == exp_data

View File

@ -35,6 +35,7 @@ log = logging.getLogger(__name__)
@pytest.mark.skipif(condition=not Env.has_caddy(), reason=f"missing caddy")
@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
class TestCaddy:
@pytest.fixture(autouse=True, scope='class')
@ -71,8 +72,7 @@ class TestCaddy:
curl = CurlClient(env=env)
url = f'https://{env.domain1}:{caddy.port}/data.json'
r = curl.http_download(urls=[url], alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
# download 1MB files sequentially
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
@ -86,10 +86,7 @@ class TestCaddy:
curl = CurlClient(env=env)
urln = f'https://{env.domain1}:{caddy.port}/data1.data?[0-{count-1}]'
r = curl.http_download(urls=[urln], alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
# sequential transfers will open 1 connection
assert r.total_connects == 1
r.check_response(count=count, http_status=200, connect_count=1)
# download 1MB files parallel
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
@ -105,13 +102,12 @@ class TestCaddy:
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--parallel'
])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
if proto == 'http/1.1':
# http/1.1 parallel transfers will open multiple connections
assert r.total_connects > 1
assert r.total_connects > 1, r.dump_logs()
else:
assert r.total_connects == 1
assert r.total_connects == 1, r.dump_logs()
# download 5MB files sequentially
@pytest.mark.parametrize("proto", ['h2', 'h3'])
@ -125,10 +121,7 @@ class TestCaddy:
curl = CurlClient(env=env)
urln = f'https://{env.domain1}:{caddy.port}/data5.data?[0-{count-1}]'
r = curl.http_download(urls=[urln], alpn_proto=proto)
assert r.exit_code == 0
r.check_stats(count=count, exp_status=200)
# sequential transfers will open 1 connection
assert r.total_connects == 1
r.check_response(count=count, http_status=200, connect_count=1)
# download 10MB files sequentially
@pytest.mark.parametrize("proto", ['h2', 'h3'])
@ -142,10 +135,7 @@ class TestCaddy:
curl = CurlClient(env=env)
urln = f'https://{env.domain1}:{caddy.port}/data10.data?[0-{count-1}]'
r = curl.http_download(urls=[urln], alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
# sequential transfers will open 1 connection
assert r.total_connects == 1
r.check_response(count=count, http_status=200, connect_count=1)
# download 10MB files parallel
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
@ -161,11 +151,10 @@ class TestCaddy:
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--parallel'
])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
if proto == 'http/1.1':
# http/1.1 parallel transfers will open multiple connections
assert r.total_connects > 1
assert r.total_connects > 1, r.dump_logs()
else:
assert r.total_connects == 1
assert r.total_connects == 1, r.dump_logs()

View File

@ -24,11 +24,13 @@
#
###########################################################################
#
import filecmp
import logging
import os
import re
import pytest
from testenv import Env, CurlClient
from testenv import Env, CurlClient, ExecResult
log = logging.getLogger(__name__)
@ -37,13 +39,33 @@ log = logging.getLogger(__name__)
class TestProxy:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env, httpd):
def _class_scope(self, env, httpd, nghttpx_fwd):
push_dir = os.path.join(httpd.docs_dir, 'push')
if not os.path.exists(push_dir):
os.makedirs(push_dir)
if env.have_nghttpx():
nghttpx_fwd.start_if_needed()
env.make_data_file(indir=env.gen_dir, fname="data-100k", fsize=100*1024)
env.make_data_file(indir=env.gen_dir, fname="data-10m", fsize=10*1024*1024)
httpd.clear_extra_configs()
httpd.reload()
def set_tunnel_proto(self, proto):
if proto == 'h2':
os.environ['CURL_PROXY_TUNNEL_H2'] = '1'
return 'HTTP/2'
else:
os.environ.pop('CURL_PROXY_TUNNEL_H2', None)
return 'HTTP/1.1'
def get_tunnel_proto_used(self, r: ExecResult):
for l in r.trace_lines:
m = re.match(r'.* CONNECT tunnel: (\S+) negotiated$', l)
if m:
return m.group(1)
assert False, f'tunnel protocol not found in:\n{"".join(r.trace_lines)}'
return None
# download via http: proxy (no tunnel)
def test_10_01_proxy_http(self, env: Env, httpd, repeat):
curl = CurlClient(env=env)
@ -53,13 +75,13 @@ class TestProxy:
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
# download via https: proxy (no tunnel)
@pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'),
reason='curl lacks HTTPS-proxy support')
def test_10_02_proxy_https(self, env: Env, httpd, repeat):
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
def test_10_02_proxy_https(self, env: Env, httpd, nghttpx_fwd, repeat):
curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
@ -68,8 +90,7 @@ class TestProxy:
'--resolve', f'{env.proxy_domain}:{env.proxys_port}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
# download http: via http: proxytunnel
def test_10_03_proxytunnel_http(self, env: Env, httpd, repeat):
@ -81,27 +102,27 @@ class TestProxy:
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
# download http: via https: proxytunnel
@pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'),
reason='curl lacks HTTPS-proxy support')
def test_10_04_proxy_https(self, env: Env, httpd, repeat):
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
def test_10_04_proxy_https(self, env: Env, httpd, nghttpx_fwd, repeat):
curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[
'--proxytunnel',
'--proxy', f'https://{env.proxy_domain}:{env.proxys_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxys_port}:127.0.0.1',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port()}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port()}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
# download https: with proto via http: proxytunnel
@pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
def test_10_05_proxytunnel_http(self, env: Env, httpd, proto, repeat):
curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/data.json'
@ -112,28 +133,129 @@ class TestProxy:
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
exp_proto = 'HTTP/2' if proto == 'h2' else 'HTTP/1.1'
assert r.response['protocol'] == exp_proto
r.check_response(count=1, http_status=200,
protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
# download https: with proto via https: proxytunnel
@pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'),
reason='curl lacks HTTPS-proxy support')
@pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
def test_10_06_proxy_https(self, env: Env, httpd, proto, repeat):
@pytest.mark.parametrize("tunnel", ['http/1.1', 'h2'])
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
def test_10_06_proxytunnel_https(self, env: Env, httpd, nghttpx_fwd, proto, tunnel, repeat):
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2')
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/data.json'
url = f'https://localhost:{env.https_port}/data.json?[0-0]'
r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
with_headers=True,
extra_args=[
'--proxytunnel',
'--proxy', f'https://{env.proxy_domain}:{env.proxys_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxys_port}:127.0.0.1',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
exp_proto = 'HTTP/2' if proto == 'h2' else 'HTTP/1.1'
assert r.response['protocol'] == exp_proto
r.check_response(count=1, http_status=200,
protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
assert self.get_tunnel_proto_used(r) == exp_tunnel_proto
srcfile = os.path.join(httpd.docs_dir, 'data.json')
dfile = curl.download_file(0)
assert filecmp.cmp(srcfile, dfile, shallow=False)
# download many https: with proto via https: proxytunnel
@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
@pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
@pytest.mark.parametrize("tunnel", ['http/1.1', 'h2'])
@pytest.mark.parametrize("fname, fcount", [
['data.json', 100],
['data-100k', 20],
['data-1m', 5]
])
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
def test_10_07_pts_down_small(self, env: Env, httpd, nghttpx_fwd, proto,
tunnel, fname, fcount, repeat):
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2')
count = fcount
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/{fname}?[0-{count-1}]'
r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
with_headers=True,
extra_args=[
'--proxytunnel',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=count, http_status=200,
protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
assert self.get_tunnel_proto_used(r) == exp_tunnel_proto
srcfile = os.path.join(httpd.docs_dir, fname)
for i in range(count):
dfile = curl.download_file(i)
assert filecmp.cmp(srcfile, dfile, shallow=False)
# upload many https: with proto via https: proxytunnel
@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
@pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
@pytest.mark.parametrize("tunnel", ['http/1.1', 'h2'])
@pytest.mark.parametrize("fname, fcount", [
['data.json', 50],
['data-100k', 20],
['data-1m', 5]
])
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
def test_10_08_upload_seq_large(self, env: Env, httpd, nghttpx, proto,
tunnel, fname, fcount, repeat):
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2')
count = fcount
srcfile = os.path.join(httpd.docs_dir, fname)
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/curltest/echo?id=[0-{count-1}]'
r = curl.http_upload(urls=[url], data=f'@{srcfile}', alpn_proto=proto,
extra_args=[
'--proxytunnel',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
assert self.get_tunnel_proto_used(r) == exp_tunnel_proto
r.check_response(count=count, http_status=200)
indata = open(srcfile).readlines()
r.check_response(count=count, http_status=200)
for i in range(count):
respdata = open(curl.response_file(i)).readlines()
assert respdata == indata
@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
@pytest.mark.parametrize("tunnel", ['http/1.1', 'h2'])
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
def test_10_09_reuse_ser(self, env: Env, httpd, nghttpx_fwd, tunnel, repeat):
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2')
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env)
url1 = f'https://localhost:{env.https_port}/data.json'
url2 = f'http://localhost:{env.http_port}/data.json'
r = curl.http_download(urls=[url1, url2], alpn_proto='http/1.1', with_stats=True,
with_headers=True,
extra_args=[
'--proxytunnel',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=2, http_status=200)
assert self.get_tunnel_proto_used(r) == exp_tunnel_proto
if tunnel == 'h2':
# TODO: we would like to reuse the first connection for the
# second URL, but this is currently not possible
# assert r.total_connects == 1
assert r.total_connects == 2
else:
assert r.total_connects == 2

View File

@ -101,10 +101,10 @@ class TestUnix:
extra_args=[
'--unix-socket', uds_faker.path,
])
r.check_exit_code(0)
r.check_stats(count=1, exp_status=200)
r.check_response(count=1, http_status=200)
# download https: via unix socket
@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
def test_11_02_unix_connect_http(self, env: Env, httpd, uds_faker, repeat):
curl = CurlClient(env=env)
url = f'https://{env.domain1}:{env.https_port}/data.json'
@ -112,7 +112,7 @@ class TestUnix:
extra_args=[
'--unix-socket', uds_faker.path,
])
r.check_exit_code(35)
r.check_response(exitcode=35, http_status=None)
# download HTTP/3 via unix socket
@pytest.mark.skipif(condition=not Env.have_h3(), reason='h3 not supported')
@ -124,4 +124,4 @@ class TestUnix:
extra_args=[
'--unix-socket', uds_faker.path,
])
r.check_exit_code(96)
r.check_response(exitcode=96, http_status=None)

View File

@ -37,6 +37,7 @@ log = logging.getLogger(__name__)
@pytest.mark.skipif(condition=Env.curl_uses_lib('bearssl'), reason='BearSSL too slow')
@pytest.mark.skipif(condition=not Env.have_ssl_curl(), reason=f"curl without SSL")
class TestReuse:
# check if HTTP/1.1 handles 'Connection: close' correctly
@ -52,8 +53,7 @@ class TestReuse:
curl = CurlClient(env=env)
urln = f'https://{env.authority_for(env.domain1, proto)}/data.json?[0-{count-1}]'
r = curl.http_download(urls=[urln], alpn_proto=proto)
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
# Server sends `Connection: close` on every 2nd request, requiring
# a new connection
assert r.total_connects == count/2
@ -72,8 +72,7 @@ class TestReuse:
r = curl.http_download(urls=[urln], alpn_proto=proto, extra_args=[
'--rate', '30/m',
])
r.check_exit_code(0)
r.check_stats(count=count, exp_status=200)
r.check_response(count=count, http_status=200)
# Connections time out on server before we send another request,
assert r.total_connects == count
# we do not see how often a request was retried in the stats, so

View File

@ -0,0 +1,193 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#***************************************************************************
# _ _ ____ _
# Project ___| | | | _ \| |
# / __| | | | |_) | |
# | (__| |_| | _ <| |___
# \___|\___/|_| \_\_____|
#
# Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://curl.se/docs/copyright.html.
#
# You may opt to use, copy, modify, merge, publish, distribute and/or sell
# copies of the Software, and permit persons to whom the Software is
# furnished to do so, under the terms of the COPYING file.
#
# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
# KIND, either express or implied.
#
# SPDX-License-Identifier: curl
#
###########################################################################
#
import filecmp
import logging
import os
import re
import time
import pytest
from testenv import Env, CurlClient
log = logging.getLogger(__name__)
@pytest.mark.skipif(condition=Env.setup_incomplete(),
reason=f"missing: {Env.incomplete_reason()}")
class TestProxyAuth:
@pytest.fixture(autouse=True, scope='class')
def _class_scope(self, env, httpd, nghttpx_fwd):
if env.have_nghttpx():
nghttpx_fwd.start_if_needed()
httpd.clear_extra_configs()
httpd.set_proxy_auth(True)
httpd.reload()
yield
httpd.set_proxy_auth(False)
httpd.reload()
def set_tunnel_proto(self, proto):
if proto == 'h2':
os.environ['CURL_PROXY_TUNNEL_H2'] = '1'
return 'HTTP/2'
else:
os.environ.pop('CURL_PROXY_TUNNEL_H2', None)
return 'HTTP/1.1'
def get_tunnel_proto_used(self, curl: CurlClient):
assert os.path.exists(curl.trace_file)
for l in open(curl.trace_file).readlines():
m = re.match(r'.* == Info: CONNECT tunnel: (\S+) negotiated', l)
if m:
return m.group(1)
return None
# download via http: proxy (no tunnel), no auth
def test_13_01_proxy_no_auth(self, env: Env, httpd, repeat):
curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_response(count=1, http_status=407)
# download via http: proxy (no tunnel), auth
def test_13_02_proxy_auth(self, env: Env, httpd, repeat):
curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[
'--proxy-user', 'proxy:proxy',
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_response(count=1, http_status=200)
@pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'),
reason='curl lacks HTTPS-proxy support')
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
def test_13_03_proxys_no_auth(self, env: Env, httpd, nghttpx_fwd, repeat):
curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[
'--proxy', f'https://{env.proxy_domain}:{env.pts_port()}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port()}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=1, http_status=407)
@pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'),
reason='curl lacks HTTPS-proxy support')
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
def test_13_04_proxys_auth(self, env: Env, httpd, nghttpx_fwd, repeat):
curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[
'--proxy-user', 'proxy:proxy',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port()}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port()}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=1, http_status=200)
def test_13_05_tunnel_http_no_auth(self, env: Env, httpd, repeat):
curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[
'--proxytunnel',
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
# expect "COULD_NOT_CONNECT"
r.check_response(exitcode=56, http_status=None)
def test_13_06_tunnel_http_auth(self, env: Env, httpd, repeat):
curl = CurlClient(env=env)
url = f'http://localhost:{env.http_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto='http/1.1', with_stats=True,
extra_args=[
'--proxytunnel',
'--proxy-user', 'proxy:proxy',
'--proxy', f'http://{env.proxy_domain}:{env.proxy_port}/',
'--resolve', f'{env.proxy_domain}:{env.proxy_port}:127.0.0.1',
])
r.check_response(count=1, http_status=200)
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
@pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'),
reason='curl lacks HTTPS-proxy support')
@pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
@pytest.mark.parametrize("tunnel", ['http/1.1', 'h2'])
def test_13_07_tunnels_no_auth(self, env: Env, httpd, proto, tunnel, repeat):
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2')
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
with_headers=True, with_trace=True,
extra_args=[
'--proxytunnel',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
# expect "COULD_NOT_CONNECT"
r.check_response(exitcode=56, http_status=None)
assert self.get_tunnel_proto_used(curl) == exp_tunnel_proto
@pytest.mark.skipif(condition=not Env.have_nghttpx(), reason="no nghttpx available")
@pytest.mark.skipif(condition=not Env.curl_has_feature('HTTPS-proxy'),
reason='curl lacks HTTPS-proxy support')
@pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
@pytest.mark.parametrize("tunnel", ['http/1.1', 'h2'])
def test_13_08_tunnels_auth(self, env: Env, httpd, proto, tunnel, repeat):
if tunnel == 'h2' and not env.curl_uses_lib('nghttp2'):
pytest.skip('only supported with nghttp2')
exp_tunnel_proto = self.set_tunnel_proto(tunnel)
curl = CurlClient(env=env)
url = f'https://localhost:{env.https_port}/data.json'
r = curl.http_download(urls=[url], alpn_proto=proto, with_stats=True,
with_headers=True, with_trace=True,
extra_args=[
'--proxytunnel',
'--proxy-user', 'proxy:proxy',
'--proxy', f'https://{env.proxy_domain}:{env.pts_port(tunnel)}/',
'--resolve', f'{env.proxy_domain}:{env.pts_port(tunnel)}:127.0.0.1',
'--proxy-cacert', env.ca.cert_file,
])
r.check_response(count=1, http_status=200,
protocol='HTTP/2' if proto == 'h2' else 'HTTP/1.1')
assert self.get_tunnel_proto_used(curl) == exp_tunnel_proto

View File

@ -24,9 +24,14 @@
#
###########################################################################
#
import pytest
pytest.register_assert_rewrite("testenv.env", "testenv.curl", "testenv.caddy",
"testenv.httpd", "testenv.nghttpx")
from .env import Env
from .certs import TestCA, Credentials
from .caddy import Caddy
from .httpd import Httpd
from .curl import CurlClient, ExecResult
from .nghttpx import Nghttpx
from .nghttpx import Nghttpx, NghttpxQuic, NghttpxFwd

View File

@ -24,6 +24,7 @@
#
###########################################################################
#
import pytest
import json
import logging
import os
@ -31,7 +32,7 @@ import re
import shutil
import subprocess
from datetime import timedelta, datetime
from typing import List, Optional, Dict
from typing import List, Optional, Dict, Union
from urllib.parse import urlparse
from .env import Env
@ -110,6 +111,10 @@ class ExecResult:
def stderr(self) -> str:
return ''.join(self._stderr)
@property
def trace_lines(self) -> List[str]:
return self._trace if self._trace else self._stderr
@property
def duration(self) -> timedelta:
return self._duration
@ -159,53 +164,97 @@ class ExecResult:
def add_assets(self, assets: List):
self._assets.extend(assets)
def check_exit_code(self, code: int):
assert self.exit_code == code, \
f'expected exit code {code}, '\
f'got {self.exit_code}\n{self._dump_logs()}'
def check_exit_code(self, code: Union[int, bool]):
if code is True:
assert self.exit_code == 0, f'expected exit code {code}, '\
f'got {self.exit_code}\n{self.dump_logs()}'
elif code is False:
assert self.exit_code != 0, f'expected exit code {code}, '\
f'got {self.exit_code}\n{self.dump_logs()}'
else:
assert self.exit_code == code, f'expected exit code {code}, '\
f'got {self.exit_code}\n{self.dump_logs()}'
def check_exit_code_not(self, code: int):
assert self.exit_code != code, \
f'expected exit code other than {code}\n{self._dump_logs()}'
def check_response(self, http_status: Optional[int] = 200,
count: Optional[int] = 1,
protocol: Optional[str] = None,
exitcode: Optional[int] = 0,
connect_count: Optional[int] = None):
if exitcode:
self.check_exit_code(exitcode)
if self.with_stats and isinstance(exitcode, int):
for idx, x in enumerate(self.stats):
if 'exitcode' in x:
assert int(x['exitcode']) == exitcode, \
f'response #{idx} exitcode: expected {exitcode}, '\
f'got {x["exitcode"]}\n{self.dump_logs()}'
def check_responses(self, count: int, exp_status: Optional[int] = None,
exp_exitcode: Optional[int] = None):
assert len(self.responses) == count, \
f'response count: expected {count}, ' \
f'got {len(self.responses)}\n{self._dump_logs()}'
if exp_status is not None:
for idx, x in enumerate(self.responses):
assert x['status'] == exp_status, \
f'response #{idx} status: expected {exp_status},'\
f'got {x["status"]}\n{self._dump_logs()}'
if exp_exitcode is not None:
for idx, x in enumerate(self.responses):
if 'exitcode' in x:
assert x['exitcode'] == 0, \
f'response #{idx} exitcode: expected {exp_exitcode}, '\
f'got {x["exitcode"]}\n{self._dump_logs()}'
if self.with_stats:
self.check_stats(count)
assert len(self.stats) == count, \
f'response count: expected {count}, ' \
f'got {len(self.stats)}\n{self.dump_logs()}'
else:
assert len(self.responses) == count, \
f'response count: expected {count}, ' \
f'got {len(self.responses)}\n{self.dump_logs()}'
if http_status is not None:
if self.with_stats:
for idx, x in enumerate(self.stats):
assert 'http_code' in x, \
f'response #{idx} reports no http_code\n{self.dump_logs()}'
assert x['http_code'] == http_status, \
f'response #{idx} http_code: expected {http_status}, '\
f'got {x["http_code"]}\n{self.dump_logs()}'
else:
for idx, x in enumerate(self.responses):
assert x['status'] == http_status, \
f'response #{idx} status: expected {http_status},'\
f'got {x["status"]}\n{self.dump_logs()}'
if protocol is not None:
if self.with_stats:
http_version = None
if protocol == 'HTTP/1.1':
http_version = '1.1'
elif protocol == 'HTTP/2':
http_version = '2'
elif protocol == 'HTTP/3':
http_version = '3'
if http_version is not None:
for idx, x in enumerate(self.stats):
assert x['http_version'] == http_version, \
f'response #{idx} protocol: expected http/{http_version},' \
f'got version {x["http_version"]}\n{self.dump_logs()}'
else:
for idx, x in enumerate(self.responses):
assert x['protocol'] == protocol, \
f'response #{idx} protocol: expected {protocol},'\
f'got {x["protocol"]}\n{self.dump_logs()}'
if connect_count is not None:
assert self.total_connects == connect_count, \
f'expected {connect_count}, but {self.total_connects} '\
f'were made\n{self.dump_logs()}'
def check_stats(self, count: int, exp_status: Optional[int] = None,
exp_exitcode: Optional[int] = None):
def check_stats(self, count: int, http_status: Optional[int] = None,
exitcode: Optional[int] = None):
if exitcode is None:
self.check_exit_code(0)
assert len(self.stats) == count, \
f'stats count: expected {count}, got {len(self.stats)}\n{self._dump_logs()}'
if exp_status is not None:
f'stats count: expected {count}, got {len(self.stats)}\n{self.dump_logs()}'
if http_status is not None:
for idx, x in enumerate(self.stats):
assert 'http_code' in x, \
f'status #{idx} reports no http_code\n{self._dump_logs()}'
assert x['http_code'] == exp_status, \
f'status #{idx} http_code: expected {exp_status}, '\
f'got {x["http_code"]}\n{self._dump_logs()}'
if exp_exitcode is not None:
f'status #{idx} reports no http_code\n{self.dump_logs()}'
assert x['http_code'] == http_status, \
f'status #{idx} http_code: expected {http_status}, '\
f'got {x["http_code"]}\n{self.dump_logs()}'
if exitcode is not None:
for idx, x in enumerate(self.stats):
if 'exitcode' in x:
assert x['exitcode'] == 0, \
f'status #{idx} exitcode: expected {exp_exitcode}, '\
f'got {x["exitcode"]}\n{self._dump_logs()}'
f'status #{idx} exitcode: expected {exitcode}, '\
f'got {x["exitcode"]}\n{self.dump_logs()}'
def _dump_logs(self):
def dump_logs(self):
lines = []
lines.append('>>--stdout ----------------------------------------------\n')
lines.extend(self._stdout)
@ -252,6 +301,10 @@ class CurlClient:
def download_file(self, i: int) -> str:
return os.path.join(self.run_dir, f'download_{i}.data')
@property
def trace_file(self) -> str:
return self._tracefile
def _rmf(self, path):
if os.path.exists(path):
return os.remove(path)
@ -272,6 +325,7 @@ class CurlClient:
with_stats: bool = True,
with_headers: bool = False,
no_save: bool = False,
with_trace: bool = False,
extra_args: List[str] = None):
if extra_args is None:
extra_args = []
@ -292,12 +346,14 @@ class CurlClient:
])
return self._raw(urls, alpn_proto=alpn_proto, options=extra_args,
with_stats=with_stats,
with_headers=with_headers)
with_headers=with_headers,
with_trace=with_trace)
def http_upload(self, urls: List[str], data: str,
alpn_proto: Optional[str] = None,
with_stats: bool = True,
with_headers: bool = False,
with_trace: bool = False,
extra_args: Optional[List[str]] = None):
if extra_args is None:
extra_args = []
@ -310,12 +366,14 @@ class CurlClient:
])
return self._raw(urls, alpn_proto=alpn_proto, options=extra_args,
with_stats=with_stats,
with_headers=with_headers)
with_headers=with_headers,
with_trace=with_trace)
def http_put(self, urls: List[str], data=None, fdata=None,
alpn_proto: Optional[str] = None,
with_stats: bool = True,
with_headers: bool = False,
with_trace: bool = False,
extra_args: Optional[List[str]] = None):
if extra_args is None:
extra_args = []
@ -333,7 +391,8 @@ class CurlClient:
return self._raw(urls, intext=data,
alpn_proto=alpn_proto, options=extra_args,
with_stats=with_stats,
with_headers=with_headers)
with_headers=with_headers,
with_trace=with_trace)
def response_file(self, idx: int):
return os.path.join(self._run_dir, f'download_{idx}.data')
@ -379,15 +438,16 @@ class CurlClient:
duration=datetime.now() - start,
with_stats=with_stats)
def _raw(self, urls, intext='', timeout=10, options=None, insecure=False,
def _raw(self, urls, intext='', timeout=None, options=None, insecure=False,
alpn_proto: Optional[str] = None,
force_resolve=True,
with_stats=False,
with_headers=True):
with_headers=True,
with_trace=False):
args = self._complete_args(
urls=urls, timeout=timeout, options=options, insecure=insecure,
alpn_proto=alpn_proto, force_resolve=force_resolve,
with_headers=with_headers)
with_headers=with_headers, with_trace=with_trace)
r = self._run(args, intext=intext, with_stats=with_stats)
if r.exit_code == 0 and with_headers:
self._parse_headerfile(self._headerfile, r=r)
@ -398,14 +458,15 @@ class CurlClient:
def _complete_args(self, urls, timeout=None, options=None,
insecure=False, force_resolve=True,
alpn_proto: Optional[str] = None,
with_headers: bool = True):
with_headers: bool = True,
with_trace: bool = False):
if not isinstance(urls, list):
urls = [urls]
args = [self._curl, "-s", "--path-as-is"]
if with_headers:
args.extend(["-D", self._headerfile])
if self.env.verbose > 2:
if with_trace or self.env.verbose > 2:
args.extend(['--trace', self._tracefile, '--trace-time'])
elif self.env.verbose > 1:
args.extend(['--trace', self._tracefile])

View File

@ -106,6 +106,7 @@ class EnvConfig:
'https': socket.SOCK_STREAM,
'proxy': socket.SOCK_STREAM,
'proxys': socket.SOCK_STREAM,
'h2proxys': socket.SOCK_STREAM,
'caddy': socket.SOCK_STREAM,
'caddys': socket.SOCK_STREAM,
})
@ -229,10 +230,18 @@ class Env:
def incomplete_reason() -> Optional[str]:
return Env.CONFIG.get_incomplete_reason()
@staticmethod
def have_nghttpx() -> bool:
return Env.CONFIG.nghttpx is not None
@staticmethod
def have_h3_server() -> bool:
return Env.CONFIG.nghttpx_with_h3
@staticmethod
def have_ssl_curl() -> bool:
return 'ssl' in Env.CONFIG.curl_props['features']
@staticmethod
def have_h2_curl() -> bool:
return 'http2' in Env.CONFIG.curl_props['features']
@ -371,13 +380,21 @@ class Env:
return self.https_port
@property
def proxy_port(self) -> str:
def proxy_port(self) -> int:
return self.CONFIG.ports['proxy']
@property
def proxys_port(self) -> str:
def proxys_port(self) -> int:
return self.CONFIG.ports['proxys']
@property
def h2proxys_port(self) -> int:
return self.CONFIG.ports['h2proxys']
def pts_port(self, proto: str = 'http/1.1') -> int:
# proxy tunnel port
return self.CONFIG.ports['h2proxys' if proto == 'h2' else 'proxys']
@property
def caddy(self) -> str:
return self.CONFIG.caddy

View File

@ -44,7 +44,9 @@ class Httpd:
MODULES = [
'log_config', 'logio', 'unixd', 'version', 'watchdog',
'authn_core', 'authz_user', 'authz_core', 'authz_host',
'authn_core', 'authn_file',
'authz_user', 'authz_core', 'authz_host',
'auth_basic', 'auth_digest',
'env', 'filter', 'headers', 'mime',
'rewrite', 'http2', 'ssl', 'proxy', 'proxy_http', 'proxy_connect',
'mpm_event',
@ -56,7 +58,7 @@ class Httpd:
MOD_CURLTEST = None
def __init__(self, env: Env):
def __init__(self, env: Env, proxy_auth: bool = False):
self.env = env
self._cmd = env.apachectl
self._apache_dir = os.path.join(env.gen_dir, 'apache')
@ -68,7 +70,9 @@ class Httpd:
self._logs_dir = os.path.join(self._apache_dir, 'logs')
self._error_log = os.path.join(self._logs_dir, 'error_log')
self._tmp_dir = os.path.join(self._apache_dir, 'tmp')
self._passwords = os.path.join(self._conf_dir, 'passwords')
self._mods_dir = None
self._proxy_auth = proxy_auth
self._extra_configs = {}
assert env.apxs
p = subprocess.run(args=[env.apxs, '-q', 'libexecdir'],
@ -103,6 +107,9 @@ class Httpd:
def clear_extra_configs(self):
self._extra_configs = {}
def set_proxy_auth(self, active: bool):
self._proxy_auth = active
def _run(self, args, intext=''):
env = {}
for key, val in os.environ.items():
@ -146,6 +153,7 @@ class Httpd:
r = self._apachectl('stop')
if r.exit_code == 0:
return self.wait_dead(timeout=timedelta(seconds=5))
log.fatal(f'stopping httpd failed: {r}')
return r.exit_code == 0
def restart(self):
@ -211,6 +219,9 @@ class Httpd:
'server': f'{domain2}',
}
fd.write(JSONEncoder().encode(data))
if self._proxy_auth:
with open(self._passwords, 'w') as fd:
fd.write('proxy:$apr1$FQfeInbs$WQZbODJlVg60j0ogEIlTW/\n')
with open(self._conf_file, 'w') as fd:
for m in self.MODULES:
if os.path.exists(os.path.join(self._mods_dir, f'mod_{m}.so')):
@ -223,9 +234,6 @@ class Httpd:
f'PidFile httpd.pid',
f'ErrorLog {self._error_log}',
f'LogLevel {self._get_log_level()}',
f'LogLevel http:trace4',
f'LogLevel proxy:trace4',
f'LogLevel proxy_http:trace4',
f'H2MinWorkers 16',
f'H2MaxWorkers 128',
f'H2Direct on',
@ -284,30 +292,33 @@ class Httpd:
conf.extend([ # http forward proxy
f'<VirtualHost *:{self.env.proxy_port}>',
f' ServerName {proxy_domain}',
f' Protocols h2c, http/1.1',
f' Protocols h2c http/1.1',
f' ProxyRequests On',
f' ProxyVia On',
f' AllowCONNECT {self.env.http_port} {self.env.https_port}',
f' <Proxy "*">',
f' Require ip 127.0.0.1',
f' </Proxy>',
])
conf.extend(self._get_proxy_conf())
conf.extend([
f'</VirtualHost>',
f'',
])
conf.extend([ # https forward proxy
f'<VirtualHost *:{self.env.proxys_port}>',
f' ServerName {proxy_domain}',
f' Protocols h2, http/1.1',
f' Protocols h2 http/1.1',
f' SSLEngine on',
f' SSLCertificateFile {proxy_creds.cert_file}',
f' SSLCertificateKeyFile {proxy_creds.pkey_file}',
f' ProxyRequests On',
f' ProxyVia On',
f' AllowCONNECT {self.env.http_port} {self.env.https_port}',
f' <Proxy "*">',
f' Require ip 127.0.0.1',
f' </Proxy>',
f'</VirtualHost>',
])
conf.extend(self._get_proxy_conf())
conf.extend([
f'</VirtualHost>',
f'',
])
fd.write("\n".join(conf))
with open(os.path.join(self._conf_dir, 'mime.types'), 'w') as fd:
fd.write("\n".join([
@ -316,13 +327,31 @@ class Httpd:
''
]))
def _get_proxy_conf(self):
if self._proxy_auth:
return [
f' <Proxy "*">',
f' AuthType Basic',
f' AuthName "Restricted Proxy"',
f' AuthBasicProvider file',
f' AuthUserFile "{self._passwords}"',
f' Require user proxy',
f' </Proxy>',
]
else:
return [
f' <Proxy "*">',
f' Require ip 127.0.0.1',
f' </Proxy>',
]
def _get_log_level(self):
#if self.env.verbose > 3:
# return 'trace2'
#if self.env.verbose > 2:
# return 'trace1'
#if self.env.verbose > 1:
# return 'debug'
if self.env.verbose > 3:
return 'trace2'
if self.env.verbose > 2:
return 'trace1'
if self.env.verbose > 1:
return 'debug'
return 'info'
def _curltest_conf(self) -> List[str]:

View File

@ -41,10 +41,12 @@ log = logging.getLogger(__name__)
class Nghttpx:
def __init__(self, env: Env):
def __init__(self, env: Env, port: int, name: str):
self.env = env
self._name = name
self._port = port
self._cmd = env.nghttpx
self._run_dir = os.path.join(env.gen_dir, 'nghttpx')
self._run_dir = os.path.join(env.gen_dir, name)
self._pid_file = os.path.join(self._run_dir, 'nghttpx.pid')
self._conf_file = os.path.join(self._run_dir, 'nghttpx.conf')
self._error_log = os.path.join(self._run_dir, 'nghttpx.log')
@ -76,27 +78,7 @@ class Nghttpx:
return True
def start(self, wait_live=True):
self._mkpath(self._tmp_dir)
if self._process:
self.stop()
args = [
self._cmd,
f'--frontend=*,{self.env.h3_port};quic',
f'--backend=127.0.0.1,{self.env.https_port};{self.env.domain1};sni={self.env.domain1};proto=h2;tls',
f'--backend=127.0.0.1,{self.env.http_port}',
f'--log-level=INFO',
f'--pid-file={self._pid_file}',
f'--errorlog-file={self._error_log}',
f'--conf={self._conf_file}',
f'--cacert={self.env.ca.cert_file}',
self.env.get_credentials(self.env.domain1).pkey_file,
self.env.get_credentials(self.env.domain1).cert_file,
]
ngerr = open(self._stderr, 'a')
self._process = subprocess.Popen(args=args, stderr=ngerr)
if self._process.returncode is not None:
return False
return not wait_live or self.wait_live(timeout=timedelta(seconds=5))
pass
def stop_if_running(self):
if self.is_running():
@ -146,7 +128,7 @@ class Nghttpx:
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'https://{self.env.domain1}:{self.env.h3_port}/'
check_url = f'https://{self.env.domain1}:{self._port}/'
r = curl.http_get(url=check_url, extra_args=['--http3-only'])
if r.exit_code != 0:
return True
@ -159,7 +141,7 @@ class Nghttpx:
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'https://{self.env.domain1}:{self.env.h3_port}/'
check_url = f'https://{self.env.domain1}:{self._port}/'
r = curl.http_get(url=check_url, extra_args=[
'--http3-only', '--trace', 'curl.trace', '--trace-time'
])
@ -184,3 +166,94 @@ class Nghttpx:
fd.write("\n".join([
'# do we need something here?'
]))
class NghttpxQuic(Nghttpx):
def __init__(self, env: Env):
super().__init__(env=env, name='nghttpx-quic', port=env.h3_port)
def start(self, wait_live=True):
self._mkpath(self._tmp_dir)
if self._process:
self.stop()
args = [
self._cmd,
f'--frontend=*,{self.env.h3_port};quic',
f'--backend=127.0.0.1,{self.env.https_port};{self.env.domain1};sni={self.env.domain1};proto=h2;tls',
f'--backend=127.0.0.1,{self.env.http_port}',
f'--log-level=INFO',
f'--pid-file={self._pid_file}',
f'--errorlog-file={self._error_log}',
f'--conf={self._conf_file}',
f'--cacert={self.env.ca.cert_file}',
self.env.get_credentials(self.env.domain1).pkey_file,
self.env.get_credentials(self.env.domain1).cert_file,
f'--frontend-http3-window-size=1M',
f'--frontend-http3-max-window-size=10M',
f'--frontend-http3-connection-window-size=10M',
f'--frontend-http3-max-connection-window-size=100M',
]
ngerr = open(self._stderr, 'a')
self._process = subprocess.Popen(args=args, stderr=ngerr)
if self._process.returncode is not None:
return False
return not wait_live or self.wait_live(timeout=timedelta(seconds=5))
class NghttpxFwd(Nghttpx):
def __init__(self, env: Env):
super().__init__(env=env, name='nghttpx-fwd', port=env.h2proxys_port)
def start(self, wait_live=True):
self._mkpath(self._tmp_dir)
if self._process:
self.stop()
args = [
self._cmd,
f'--http2-proxy',
f'--frontend=*,{self.env.h2proxys_port}',
f'--backend=127.0.0.1,{self.env.proxy_port}',
f'--log-level=INFO',
f'--pid-file={self._pid_file}',
f'--errorlog-file={self._error_log}',
f'--conf={self._conf_file}',
f'--cacert={self.env.ca.cert_file}',
self.env.get_credentials(self.env.proxy_domain).pkey_file,
self.env.get_credentials(self.env.proxy_domain).cert_file,
]
ngerr = open(self._stderr, 'a')
self._process = subprocess.Popen(args=args, stderr=ngerr)
if self._process.returncode is not None:
return False
return not wait_live or self.wait_live(timeout=timedelta(seconds=5))
def wait_dead(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'https://{self.env.proxy_domain}:{self.env.h2proxys_port}/'
r = curl.http_get(url=check_url)
if r.exit_code != 0:
return True
log.debug(f'waiting for nghttpx-fwd to stop responding: {r}')
time.sleep(.1)
log.debug(f"Server still responding after {timeout}")
return False
def wait_live(self, timeout: timedelta):
curl = CurlClient(env=self.env, run_dir=self._tmp_dir)
try_until = datetime.now() + timeout
while datetime.now() < try_until:
check_url = f'https://{self.env.proxy_domain}:{self.env.h2proxys_port}/'
r = curl.http_get(url=check_url, extra_args=[
'--trace', 'curl.trace', '--trace-time'
])
if r.exit_code == 0:
return True
log.debug(f'waiting for nghttpx-fwd to become responsive: {r}')
time.sleep(.1)
log.error(f"Server still not responding after {timeout}")
return False