http: fix sending of large requests

- refs #11342 where errors with git https interactions
  were observed
- problem was caused by 1st sends of size larger than 64KB
  which resulted in later retries of 64KB only
- limit sending of 1st block to 64KB
- adjust h2/h3 filters to cope with parsing the HTTP/1.1
  formatted request in chunks

- introducing Curl_nwrite() as companion to Curl_write()
  for the many cases where the sockindex is already known

Fixes #11342 (again)
Closes #11803
This commit is contained in:
Stefan Eissing 2023-09-05 13:44:13 +02:00 committed by Daniel Stenberg
parent a8a82140aa
commit 2485547da0
No known key found for this signature in database
GPG Key ID: 5CC908FDB71E12C2
13 changed files with 162 additions and 92 deletions

View File

@ -185,7 +185,7 @@ static CURLcode gopher_do(struct Curl_easy *data, bool *done)
if(strlen(sel) < 1)
break;
result = Curl_write(data, sockfd, sel, k, &amount);
result = Curl_nwrite(data, FIRSTSOCKET, sel, k, &amount);
if(!result) { /* Which may not have written it all! */
result = Curl_client_write(data, CLIENTWRITE_HEADER, sel, amount);
if(result)
@ -227,7 +227,7 @@ static CURLcode gopher_do(struct Curl_easy *data, bool *done)
free(sel_org);
if(!result)
result = Curl_write(data, sockfd, "\r\n", 2, &amount);
result = Curl_nwrite(data, FIRSTSOCKET, "\r\n", 2, &amount);
if(result) {
failf(data, "Failed sending Gopher request");
return result;

View File

@ -1281,7 +1281,7 @@ CURLcode Curl_buffer_send(struct dynbuf *in,
curl_off_t *bytes_written,
/* how much of the buffer contains body data */
curl_off_t included_body_bytes,
int socketindex)
int sockindex)
{
ssize_t amount;
CURLcode result;
@ -1289,12 +1289,9 @@ CURLcode Curl_buffer_send(struct dynbuf *in,
size_t size;
struct connectdata *conn = data->conn;
size_t sendsize;
curl_socket_t sockfd;
size_t headersize;
DEBUGASSERT(socketindex <= SECONDARYSOCKET);
sockfd = Curl_conn_get_socket(data, socketindex);
DEBUGASSERT(sockindex <= SECONDARYSOCKET && sockindex >= 0);
/* The looping below is required since we use non-blocking sockets, but due
to the circumstances we will just loop and try again and again etc */
@ -1376,9 +1373,25 @@ CURLcode Curl_buffer_send(struct dynbuf *in,
else
sendsize = size;
}
/* We currently cannot send more that this for http here:
* - if sending blocks, it return 0 as amount
* - we then whisk aside the `in` into the `http` struct
* and install our own `data->state.fread_func` that
* on subsequent calls reads `in` empty.
* - when the whisked away `in` is empty, the `fread_func`
* is restored ot its original state.
* The problem is that `fread_func` can only return
* `upload_buffer_size` lengths. If the send we do here
* is larger and blocks, we do re-sending with smaller
* amounts of data and connection filters do not like
* that.
*/
if(http && (sendsize > (size_t)data->set.upload_buffer_size))
sendsize = (size_t)data->set.upload_buffer_size;
}
result = Curl_write(data, sockfd, ptr, sendsize, &amount);
result = Curl_nwrite(data, sockindex, ptr, sendsize, &amount);
if(!result) {
/*

View File

@ -41,6 +41,7 @@
#include "urlapi-int.h"
#include "cfilters.h"
#include "connect.h"
#include "rand.h"
#include "strtoofft.h"
#include "strdup.h"
#include "transfer.h"
@ -175,6 +176,7 @@ struct stream_ctx {
int32_t id; /* HTTP/2 protocol identifier for stream */
struct bufq recvbuf; /* response buffer */
struct bufq sendbuf; /* request buffer */
struct h1_req_parser h1; /* parsing the request */
struct dynhds resp_trailers; /* response trailer fields */
size_t resp_hds_len; /* amount of response header bytes in recvbuf */
size_t upload_blocked_len;
@ -253,6 +255,7 @@ static CURLcode http2_data_setup(struct Curl_cfilter *cf,
H2_STREAM_SEND_CHUNKS, BUFQ_OPT_NONE);
Curl_bufq_initp(&stream->recvbuf, &ctx->stream_bufcp,
H2_STREAM_RECV_CHUNKS, BUFQ_OPT_SOFT_LIMIT);
Curl_h1_req_parse_init(&stream->h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
Curl_dynhds_init(&stream->resp_trailers, 0, DYN_HTTP_REQUEST);
stream->resp_hds_len = 0;
stream->bodystarted = FALSE;
@ -313,6 +316,7 @@ static void http2_data_done(struct Curl_cfilter *cf,
Curl_bufq_free(&stream->sendbuf);
Curl_bufq_free(&stream->recvbuf);
Curl_h1_req_parse_free(&stream->h1);
Curl_dynhds_free(&stream->resp_trailers);
if(stream->push_headers) {
/* if they weren't used and then freed before */
@ -2014,7 +2018,6 @@ static ssize_t h2_submit(struct stream_ctx **pstream,
{
struct cf_h2_ctx *ctx = cf->ctx;
struct stream_ctx *stream = NULL;
struct h1_req_parser h1;
struct dynhds h2_headers;
nghttp2_nv *nva = NULL;
const void *body = NULL;
@ -2024,7 +2027,6 @@ static ssize_t h2_submit(struct stream_ctx **pstream,
nghttp2_priority_spec pri_spec;
ssize_t nwritten;
Curl_h1_req_parse_init(&h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
*err = http2_data_setup(cf, data, &stream);
@ -2033,17 +2035,22 @@ static ssize_t h2_submit(struct stream_ctx **pstream,
goto out;
}
nwritten = Curl_h1_req_parse_read(&h1, buf, len, NULL, 0, err);
nwritten = Curl_h1_req_parse_read(&stream->h1, buf, len, NULL, 0, err);
if(nwritten < 0)
goto out;
DEBUGASSERT(h1.done);
DEBUGASSERT(h1.req);
if(!stream->h1.done) {
/* need more data */
goto out;
}
DEBUGASSERT(stream->h1.req);
*err = Curl_http_req_to_h2(&h2_headers, h1.req, data);
*err = Curl_http_req_to_h2(&h2_headers, stream->h1.req, data);
if(*err) {
nwritten = -1;
goto out;
}
/* no longer needed */
Curl_h1_req_parse_free(&stream->h1);
nheader = Curl_dynhds_count(&h2_headers);
nva = malloc(sizeof(nghttp2_nv) * nheader);
@ -2151,7 +2158,6 @@ out:
stream? stream->id : -1, nwritten, *err);
Curl_safefree(nva);
*pstream = stream;
Curl_h1_req_parse_free(&h1);
Curl_dynhds_free(&h2_headers);
return nwritten;
}
@ -2179,7 +2185,8 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
if(len < stream->upload_blocked_len) {
/* Did we get called again with a smaller `len`? This should not
* happen. We are not prepared to handle that. */
failf(data, "HTTP/2 send again with decreased length");
failf(data, "HTTP/2 send again with decreased length (%zd vs %zd)",
len, stream->upload_blocked_len);
*err = CURLE_HTTP2;
nwritten = -1;
goto out;
@ -2211,11 +2218,8 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
* optionally request body, and now are going to send or sending
* more request body in DATA frame */
nwritten = Curl_bufq_write(&stream->sendbuf, buf, len, err);
if(nwritten < 0) {
if(*err != CURLE_AGAIN)
goto out;
nwritten = 0;
}
if(nwritten < 0 && *err != CURLE_AGAIN)
goto out;
}
if(!Curl_bufq_is_empty(&stream->sendbuf)) {
@ -2262,7 +2266,7 @@ static ssize_t cf_h2_send(struct Curl_cfilter *cf, struct Curl_easy *data,
blocked = 1;
}
if(stream && blocked) {
if(stream && blocked && nwritten > 0) {
/* Unable to send all data, due to connection blocked or H2 window
* exhaustion. Data is left in our stream buffer, or nghttp2's internal
* frame buffer or our network out buffer. */

View File

@ -91,7 +91,7 @@ static CURLcode ftpsend(struct Curl_easy *data, struct connectdata *conn,
#ifdef HAVE_GSSAPI
conn->data_prot = PROT_CMD;
#endif
result = Curl_write(data, conn->sock[FIRSTSOCKET], sptr, write_len,
result = Curl_nwrite(data, FIRSTSOCKET, sptr, write_len,
&bytes_written);
#ifdef HAVE_GSSAPI
DEBUGASSERT(data_sec > PROT_NONE && data_sec < PROT_LAST);

View File

@ -117,11 +117,9 @@ static CURLcode mqtt_send(struct Curl_easy *data,
char *buf, size_t len)
{
CURLcode result = CURLE_OK;
struct connectdata *conn = data->conn;
curl_socket_t sockfd = conn->sock[FIRSTSOCKET];
struct MQTT *mq = data->req.p.mqtt;
ssize_t n;
result = Curl_write(data, sockfd, buf, len, &n);
result = Curl_nwrite(data, FIRSTSOCKET, buf, len, &n);
if(result)
return result;
Curl_debug(data, CURLINFO_HEADER_OUT, buf, (size_t)n);

View File

@ -204,8 +204,7 @@ CURLcode Curl_pp_vsendf(struct Curl_easy *data,
#ifdef HAVE_GSSAPI
conn->data_prot = PROT_CMD;
#endif
result = Curl_write(data, conn->sock[FIRSTSOCKET], s, write_len,
&bytes_written);
result = Curl_nwrite(data, FIRSTSOCKET, s, write_len, &bytes_written);
if(result)
return result;
#ifdef HAVE_GSSAPI
@ -467,11 +466,10 @@ CURLcode Curl_pp_flushsend(struct Curl_easy *data,
struct pingpong *pp)
{
/* we have a piece of a command still left to send */
struct connectdata *conn = data->conn;
ssize_t written;
curl_socket_t sock = conn->sock[FIRSTSOCKET];
CURLcode result = Curl_write(data, sock, pp->sendthis + pp->sendsize -
pp->sendleft, pp->sendleft, &written);
CURLcode result = Curl_nwrite(data, FIRSTSOCKET,
pp->sendthis + pp->sendsize - pp->sendleft,
pp->sendleft, &written);
if(result)
return result;

View File

@ -138,6 +138,56 @@ static size_t convert_lineends(struct Curl_easy *data,
}
#endif /* CURL_DO_LINEEND_CONV && !CURL_DISABLE_FTP */
/*
* Curl_nwrite() is an internal write function that sends data to the
* server. Works with a socket index for the connection.
*
* If the write would block (CURLE_AGAIN), it returns CURLE_OK and
* (*nwritten == 0). Otherwise we return regular CURLcode value.
*/
CURLcode Curl_nwrite(struct Curl_easy *data,
int sockindex,
const void *buf,
size_t blen,
ssize_t *pnwritten)
{
ssize_t nwritten;
CURLcode result = CURLE_OK;
struct connectdata *conn;
DEBUGASSERT(sockindex >= 0 && sockindex < 2);
DEBUGASSERT(pnwritten);
DEBUGASSERT(data);
DEBUGASSERT(data->conn);
conn = data->conn;
#ifdef CURLDEBUG
{
/* Allow debug builds to override this logic to force short sends
*/
char *p = getenv("CURL_SMALLSENDS");
if(p) {
size_t altsize = (size_t)strtoul(p, NULL, 10);
if(altsize)
blen = CURLMIN(blen, altsize);
}
}
#endif
nwritten = conn->send[sockindex](data, sockindex, buf, blen, &result);
if(result == CURLE_AGAIN) {
nwritten = 0;
result = CURLE_OK;
}
else if(result) {
nwritten = -1; /* make sure */
}
else {
DEBUGASSERT(nwritten >= 0);
}
*pnwritten = nwritten;
return result;
}
/*
* Curl_write() is an internal write function that sends data to the
* server. Works with plain sockets, SCP, SSL or kerberos.
@ -151,48 +201,14 @@ CURLcode Curl_write(struct Curl_easy *data,
size_t len,
ssize_t *written)
{
ssize_t bytes_written;
CURLcode result = CURLE_OK;
struct connectdata *conn;
int num;
DEBUGASSERT(data);
DEBUGASSERT(data->conn);
conn = data->conn;
num = (sockfd != CURL_SOCKET_BAD && sockfd == conn->sock[SECONDARYSOCKET]);
#ifdef CURLDEBUG
{
/* Allow debug builds to override this logic to force short sends
*/
char *p = getenv("CURL_SMALLSENDS");
if(p) {
size_t altsize = (size_t)strtoul(p, NULL, 10);
if(altsize)
len = CURLMIN(len, altsize);
}
}
#endif
bytes_written = conn->send[num](data, num, mem, len, &result);
*written = bytes_written;
if(bytes_written >= 0)
/* we completely ignore the curlcode value when subzero is not returned */
return CURLE_OK;
/* handle CURLE_AGAIN or a send failure */
switch(result) {
case CURLE_AGAIN:
*written = 0;
return CURLE_OK;
case CURLE_OK:
/* general send failure */
return CURLE_SEND_ERROR;
default:
/* we got a specific curlcode, forward it */
return result;
}
return Curl_nwrite(data, num, mem, len, written);
}
static CURLcode pausewrite(struct Curl_easy *data,

View File

@ -51,4 +51,11 @@ CURLcode Curl_write(struct Curl_easy *data,
const void *mem, size_t len,
ssize_t *written);
/* internal write-function, using sockindex for connection destination */
CURLcode Curl_nwrite(struct Curl_easy *data,
int sockindex,
const void *buf,
size_t blen,
ssize_t *pnwritten);
#endif /* HEADER_CURL_SENDF_H */

View File

@ -564,12 +564,11 @@ static CURLcode smb_send(struct Curl_easy *data, ssize_t len,
size_t upload_size)
{
struct connectdata *conn = data->conn;
curl_socket_t sockfd = conn->sock[FIRSTSOCKET];
struct smb_conn *smbc = &conn->proto.smbc;
ssize_t bytes_written;
CURLcode result;
result = Curl_write(data, sockfd, data->state.ulbuf,
result = Curl_nwrite(data, FIRSTSOCKET, data->state.ulbuf,
len, &bytes_written);
if(result)
return result;
@ -587,7 +586,6 @@ static CURLcode smb_send(struct Curl_easy *data, ssize_t len,
static CURLcode smb_flush(struct Curl_easy *data)
{
struct connectdata *conn = data->conn;
curl_socket_t sockfd = conn->sock[FIRSTSOCKET];
struct smb_conn *smbc = &conn->proto.smbc;
ssize_t bytes_written;
ssize_t len = smbc->send_size - smbc->sent;
@ -596,9 +594,9 @@ static CURLcode smb_flush(struct Curl_easy *data)
if(!smbc->send_size)
return CURLE_OK;
result = Curl_write(data, sockfd,
data->state.ulbuf + smbc->sent,
len, &bytes_written);
result = Curl_nwrite(data, FIRSTSOCKET,
data->state.ulbuf + smbc->sent,
len, &bytes_written);
if(result)
return result;

View File

@ -1266,10 +1266,10 @@ static CURLcode send_telnet_data(struct Curl_easy *data,
break;
default: /* write! */
bytes_written = 0;
result = Curl_write(data, conn->sock[FIRSTSOCKET],
outbuf + total_written,
outlen - total_written,
&bytes_written);
result = Curl_nwrite(data, FIRSTSOCKET,
outbuf + total_written,
outlen - total_written,
&bytes_written);
total_written += bytes_written;
break;
}

View File

@ -179,6 +179,7 @@ struct h3_stream_ctx {
int64_t id; /* HTTP/3 protocol identifier */
struct bufq sendbuf; /* h3 request body */
struct bufq recvbuf; /* h3 response body */
struct h1_req_parser h1; /* h1 request parsing */
size_t sendbuf_len_in_flight; /* sendbuf amount "in flight" */
size_t upload_blocked_len; /* the amount written last and EGAINed */
size_t recv_buf_nonflow; /* buffered bytes, not counting for flow control */
@ -226,6 +227,7 @@ static CURLcode h3_data_setup(struct Curl_cfilter *cf,
Curl_bufq_initp(&stream->recvbuf, &ctx->stream_bufcp,
H3_STREAM_RECV_CHUNKS, BUFQ_OPT_SOFT_LIMIT);
stream->recv_buf_nonflow = 0;
Curl_h1_req_parse_init(&stream->h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
H3_STREAM_LCTX(data) = stream;
return CURLE_OK;
@ -240,6 +242,7 @@ static void h3_data_done(struct Curl_cfilter *cf, struct Curl_easy *data)
CURL_TRC_CF(data, cf, "[%"PRId64"] easy handle is done", stream->id);
Curl_bufq_free(&stream->sendbuf);
Curl_bufq_free(&stream->recvbuf);
Curl_h1_req_parse_free(&stream->h1);
free(stream);
H3_STREAM_LCTX(data) = NULL;
}
@ -1625,7 +1628,6 @@ static ssize_t h3_stream_open(struct Curl_cfilter *cf,
{
struct cf_ngtcp2_ctx *ctx = cf->ctx;
struct h3_stream_ctx *stream = NULL;
struct h1_req_parser h1;
struct dynhds h2_headers;
size_t nheader;
nghttp3_nv *nva = NULL;
@ -1635,7 +1637,6 @@ static ssize_t h3_stream_open(struct Curl_cfilter *cf,
nghttp3_data_reader reader;
nghttp3_data_reader *preader = NULL;
Curl_h1_req_parse_init(&h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
*err = h3_data_setup(cf, data);
@ -1651,17 +1652,22 @@ static ssize_t h3_stream_open(struct Curl_cfilter *cf,
goto out;
}
nwritten = Curl_h1_req_parse_read(&h1, buf, len, NULL, 0, err);
nwritten = Curl_h1_req_parse_read(&stream->h1, buf, len, NULL, 0, err);
if(nwritten < 0)
goto out;
DEBUGASSERT(h1.done);
DEBUGASSERT(h1.req);
if(!stream->h1.done) {
/* need more data */
goto out;
}
DEBUGASSERT(stream->h1.req);
*err = Curl_http_req_to_h2(&h2_headers, h1.req, data);
*err = Curl_http_req_to_h2(&h2_headers, stream->h1.req, data);
if(*err) {
nwritten = -1;
goto out;
}
/* no longer needed */
Curl_h1_req_parse_free(&stream->h1);
nheader = Curl_dynhds_count(&h2_headers);
nva = malloc(sizeof(nghttp3_nv) * nheader);
@ -1734,7 +1740,6 @@ static ssize_t h3_stream_open(struct Curl_cfilter *cf,
out:
free(nva);
Curl_h1_req_parse_free(&h1);
Curl_dynhds_free(&h2_headers);
return nwritten;
}

View File

@ -210,6 +210,7 @@ static CURLcode quic_ssl_setup(struct Curl_cfilter *cf, struct Curl_easy *data)
struct stream_ctx {
int64_t id; /* HTTP/3 protocol stream identifier */
struct bufq recvbuf; /* h3 response */
struct h1_req_parser h1; /* h1 request parsing */
uint64_t error3; /* HTTP/3 stream error code */
curl_off_t upload_left; /* number of request bytes left to upload */
bool closed; /* TRUE on stream close */
@ -298,6 +299,7 @@ static CURLcode h3_data_setup(struct Curl_cfilter *cf,
stream->id = -1;
Curl_bufq_initp(&stream->recvbuf, &ctx->stream_bufcp,
H3_STREAM_RECV_CHUNKS, BUFQ_OPT_SOFT_LIMIT);
Curl_h1_req_parse_init(&stream->h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
return CURLE_OK;
}
@ -314,6 +316,7 @@ static void h3_data_done(struct Curl_cfilter *cf, struct Curl_easy *data)
--ctx->sends_on_hold;
}
Curl_bufq_free(&stream->recvbuf);
Curl_h1_req_parse_free(&stream->h1);
free(stream);
H3_STREAM_LCTX(data) = NULL;
}
@ -936,7 +939,6 @@ static ssize_t h3_open_stream(struct Curl_cfilter *cf,
struct stream_ctx *stream = H3_STREAM_CTX(data);
size_t nheader, i;
int64_t stream3_id;
struct h1_req_parser h1;
struct dynhds h2_headers;
quiche_h3_header *nva = NULL;
ssize_t nwritten;
@ -950,21 +952,25 @@ static ssize_t h3_open_stream(struct Curl_cfilter *cf,
DEBUGASSERT(stream);
}
Curl_h1_req_parse_init(&h1, H1_PARSE_DEFAULT_MAX_LINE_LEN);
Curl_dynhds_init(&h2_headers, 0, DYN_HTTP_REQUEST);
DEBUGASSERT(stream);
nwritten = Curl_h1_req_parse_read(&h1, buf, len, NULL, 0, err);
nwritten = Curl_h1_req_parse_read(&stream->h1, buf, len, NULL, 0, err);
if(nwritten < 0)
goto out;
DEBUGASSERT(h1.done);
DEBUGASSERT(h1.req);
if(!stream->h1.done) {
/* need more data */
goto out;
}
DEBUGASSERT(stream->h1.req);
*err = Curl_http_req_to_h2(&h2_headers, h1.req, data);
*err = Curl_http_req_to_h2(&h2_headers, stream->h1.req, data);
if(*err) {
nwritten = -1;
goto out;
}
/* no longer needed */
Curl_h1_req_parse_free(&stream->h1);
nheader = Curl_dynhds_count(&h2_headers);
nva = malloc(sizeof(quiche_h3_header) * nheader);
@ -1041,7 +1047,6 @@ static ssize_t h3_open_stream(struct Curl_cfilter *cf,
out:
free(nva);
Curl_h1_req_parse_free(&h1);
Curl_dynhds_free(&h2_headers);
return nwritten;
}

View File

@ -407,6 +407,32 @@ class TestUpload:
respdata = open(curl.response_file(0)).readlines()
assert respdata == indata
# POST data urlencoded, small enough to be sent with request headers
# and request headers are so large that the first send is larger
# than our default upload buffer length (64KB).
# Unfixed, this will fail when run with CURL_DBG_SOCK_WBLOCK=80 most
# of the time
@pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
def test_07_41_post_urlenc_small(self, env: Env, httpd, nghttpx, repeat, proto):
if proto == 'h3' and not env.have_h3():
pytest.skip("h3 not supported")
if proto == 'h3' and env.curl_uses_lib('msh3'):
pytest.skip("msh3 fails here")
if proto == 'h3' and env.curl_uses_lib('quiche'):
pytest.skip("quiche has CWND issues with large requests")
fdata = os.path.join(env.gen_dir, 'data-63k')
curl = CurlClient(env=env)
extra_args = ['--trace-config', 'http/2,http/3']
# add enough headers so that the first send chunk is > 64KB
for i in range(63):
extra_args.extend(['-H', f'x{i:02d}: {"y"*1019}'])
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/echo?id=[0-0]'
r = curl.http_upload(urls=[url], data=f'@{fdata}', alpn_proto=proto, extra_args=extra_args)
r.check_stats(count=1, http_status=200, exitcode=0)
indata = open(fdata).readlines()
respdata = open(curl.response_file(0)).readlines()
assert respdata == indata
def check_download(self, count, srcfile, curl):
for i in range(count):
dfile = curl.download_file(i)