transfer: readwrite improvements

- changed header/chunk/handler->readwrite prototypes to accept `buf`,
  `blen` and a `pconsumed` pointer. They now get the buffer to work on
  and report back how many bytes they consumed
- eliminated `k->str` in SingleRequest
- improved excess data handling to properly calculate with any body data
  left in the headerb buffer
- eliminated `k->badheader` enum to only be a bool

Closes #12283
This commit is contained in:
Stefan Eissing 2023-11-06 17:06:06 +01:00 committed by Daniel Stenberg
parent d9e7643ced
commit 1cd2f0072f
No known key found for this signature in database
GPG Key ID: 5CC908FDB71E12C2
8 changed files with 269 additions and 246 deletions

View File

@ -386,12 +386,12 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
return CURLE_OK;
while(ts->keepon) {
ssize_t gotbytes;
ssize_t nread;
char byte;
/* Read one byte at a time to avoid a race condition. Wait at most one
second before looping to ensure continuous pgrsUpdates. */
result = Curl_read(data, tunnelsocket, &byte, 1, &gotbytes);
result = Curl_read(data, tunnelsocket, &byte, 1, &nread);
if(result == CURLE_AGAIN)
/* socket buffer drained, return */
return CURLE_OK;
@ -404,7 +404,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
break;
}
if(gotbytes <= 0) {
if(nread <= 0) {
if(data->set.proxyauth && data->state.authproxy.avail &&
data->state.aptr.proxyuserpwd) {
/* proxy auth was requested and there was proxy auth available,
@ -437,11 +437,11 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
properly to know when the end of the body is reached */
CHUNKcode r;
CURLcode extra;
ssize_t tookcareof = 0;
size_t consumed = 0;
/* now parse the chunked piece of data so that we can
properly tell when the stream ends */
r = Curl_httpchunk_read(data, &byte, 1, &tookcareof, &extra);
r = Curl_httpchunk_read(data, &byte, 1, &consumed, &extra);
if(r == CHUNKE_STOP) {
/* we're done reading chunks! */
infof(data, "chunk reading DONE");
@ -499,6 +499,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
else if(ts->chunked_encoding) {
CHUNKcode r;
CURLcode extra;
size_t consumed = 0;
infof(data, "Ignore chunked response-body");
@ -513,8 +514,7 @@ static CURLcode recv_CONNECT_resp(struct Curl_cfilter *cf,
/* now parse the chunked piece of data so that we can properly
tell when the stream ends */
r = Curl_httpchunk_read(data, linep + 1, 1, &gotbytes,
&extra);
r = Curl_httpchunk_read(data, linep + 1, 1, &consumed, &extra);
if(r == CHUNKE_STOP) {
/* we're done reading chunks! */
infof(data, "chunk reading DONE");

View File

@ -3995,36 +3995,32 @@ CURLcode Curl_bump_headersize(struct Curl_easy *data,
*/
CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
struct connectdata *conn,
ssize_t *nread,
const char *buf, size_t blen,
size_t *pconsumed,
bool *stop_reading)
{
CURLcode result;
struct SingleRequest *k = &data->req;
ssize_t onread = *nread;
char *ostr = k->str;
char *headp;
char *str_start;
char *end_ptr;
/* header line within buffer loop */
*stop_reading = FALSE;
*pconsumed = 0;
do {
size_t rest_length;
size_t full_length;
size_t line_length;
int writetype;
/* str_start is start of line within buf */
str_start = k->str;
/* data is in network encoding so use 0x0a instead of '\n' */
end_ptr = memchr(str_start, 0x0a, *nread);
end_ptr = memchr(buf, 0x0a, blen);
if(!end_ptr) {
/* Not a complete header line within buffer, append the data to
the end of the headerbuff. */
result = Curl_dyn_addn(&data->state.headerb, str_start, *nread);
result = Curl_dyn_addn(&data->state.headerb, buf, blen);
if(result)
return result;
*pconsumed += blen;
if(!k->headerline) {
/* check if this looks like a protocol header */
@ -4036,31 +4032,28 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
if(st == STATUS_BAD) {
/* this is not the beginning of a protocol first header line */
k->header = FALSE;
k->badheader = HEADER_ALLBAD;
k->badheader = TRUE;
streamclose(conn, "bad HTTP: No end-of-message indicator");
if(!data->set.http09_allowed) {
failf(data, "Received HTTP/0.9 when not allowed");
return CURLE_UNSUPPORTED_PROTOCOL;
}
break;
goto out;
}
}
*nread = 0;
break; /* read more and try again */
goto out; /* read more and try again */
}
/* decrease the size of the remaining (supposed) header line */
rest_length = (end_ptr - k->str) + 1;
*nread -= (ssize_t)rest_length;
k->str = end_ptr + 1; /* move past new line */
full_length = k->str - str_start;
result = Curl_dyn_addn(&data->state.headerb, str_start, full_length);
line_length = (end_ptr - buf) + 1;
result = Curl_dyn_addn(&data->state.headerb, buf, line_length);
if(result)
return result;
blen -= line_length;
buf += line_length;
*pconsumed += line_length;
/****
* We now have a FULL header line in 'headerb'.
*****/
@ -4078,14 +4071,12 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
return CURLE_UNSUPPORTED_PROTOCOL;
}
k->header = FALSE;
if(*nread)
if(blen)
/* since there's more, this is a partial bad header */
k->badheader = HEADER_PARTHEADER;
k->badheader = TRUE;
else {
/* this was all we read so it's all a bad header */
k->badheader = HEADER_ALLBAD;
*nread = onread;
k->str = ostr;
k->badheader = TRUE;
return CURLE_OK;
}
break;
@ -4139,22 +4130,23 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
/* switch to http2 now. The bytes after response headers
are also processed here, otherwise they are lost. */
result = Curl_http2_upgrade(data, conn, FIRSTSOCKET,
k->str, *nread);
result = Curl_http2_upgrade(data, conn, FIRSTSOCKET, buf, blen);
if(result)
return result;
*nread = 0;
*pconsumed += blen;
blen = 0;
}
#ifdef USE_WEBSOCKETS
else if(k->upgr101 == UPGR101_WS) {
/* verify the response */
result = Curl_ws_accept(data, k->str, *nread);
result = Curl_ws_accept(data, buf, blen);
if(result)
return result;
k->header = FALSE; /* no more header to parse! */
if(data->set.connect_only) {
k->keepon &= ~KEEP_RECV; /* read no more content */
*nread = 0;
*pconsumed += blen;
blen = 0;
}
}
#endif
@ -4393,8 +4385,10 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
k->keepon &= ~KEEP_RECV;
}
Curl_debug(data, CURLINFO_HEADER_IN, str_start, headerlen);
break; /* exit header line loop */
Curl_debug(data, CURLINFO_HEADER_IN,
Curl_dyn_ptr(&data->state.headerb),
Curl_dyn_len(&data->state.headerb));
goto out; /* exit header line loop */
}
/* We continue reading headers, reset the line-based header */
@ -4583,12 +4577,12 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
Curl_dyn_reset(&data->state.headerb);
}
while(*k->str); /* header line within buffer */
while(blen);
/* We might have reached the end of the header part here, but
there might be a non-header part left in the end of the read
buffer. */
out:
return CURLE_OK;
}

View File

@ -227,7 +227,8 @@ CURLcode Curl_http_size(struct Curl_easy *data);
CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
struct connectdata *conn,
ssize_t *nread,
const char *buf, size_t blen,
size_t *pconsumed,
bool *stop_reading);
/**

View File

@ -98,9 +98,9 @@ void Curl_httpchunk_init(struct Curl_easy *data)
* For example, 0x0d and 0x0a are used instead of '\r' and '\n'.
*/
CHUNKcode Curl_httpchunk_read(struct Curl_easy *data,
char *datap,
ssize_t datalen,
ssize_t *wrote,
char *buf,
size_t blen,
size_t *pconsumed,
CURLcode *extrap)
{
CURLcode result = CURLE_OK;
@ -108,28 +108,27 @@ CHUNKcode Curl_httpchunk_read(struct Curl_easy *data,
struct Curl_chunker *ch = &conn->chunk;
struct SingleRequest *k = &data->req;
size_t piece;
curl_off_t length = (curl_off_t)datalen;
*wrote = 0; /* nothing's written yet */
*pconsumed = 0; /* nothing's written yet */
/* the original data is written to the client, but we go on with the
chunk read process, to properly calculate the content length */
if(data->set.http_te_skip && !k->ignorebody) {
result = Curl_client_write(data, CLIENTWRITE_BODY, datap, datalen);
result = Curl_client_write(data, CLIENTWRITE_BODY, buf, blen);
if(result) {
*extrap = result;
return CHUNKE_PASSTHRU_ERROR;
}
}
while(length) {
while(blen) {
switch(ch->state) {
case CHUNK_HEX:
if(ISXDIGIT(*datap)) {
if(ISXDIGIT(*buf)) {
if(ch->hexindex < CHUNK_MAXNUM_LEN) {
ch->hexbuffer[ch->hexindex] = *datap;
datap++;
length--;
ch->hexbuffer[ch->hexindex] = *buf;
buf++;
blen--;
ch->hexindex++;
}
else {
@ -143,7 +142,7 @@ CHUNKcode Curl_httpchunk_read(struct Curl_easy *data,
a hexadecimal digit. */
return CHUNKE_ILLEGAL_HEX;
/* length and datap are unmodified */
/* blen and buf are unmodified */
ch->hexbuffer[ch->hexindex] = 0;
if(curlx_strtoofft(ch->hexbuffer, &endptr, 16, &ch->datasize))
@ -154,7 +153,7 @@ CHUNKcode Curl_httpchunk_read(struct Curl_easy *data,
case CHUNK_LF:
/* waiting for the LF after a chunk size */
if(*datap == 0x0a) {
if(*buf == 0x0a) {
/* we're now expecting data to come, unless size was zero! */
if(0 == ch->datasize) {
ch->state = CHUNK_TRAILER; /* now check for trailers */
@ -163,19 +162,21 @@ CHUNKcode Curl_httpchunk_read(struct Curl_easy *data,
ch->state = CHUNK_DATA;
}
datap++;
length--;
buf++;
blen--;
break;
case CHUNK_DATA:
/* We expect 'datasize' of data. We have 'length' right now, it can be
/* We expect 'datasize' of data. We have 'blen' right now, it can be
more or less than 'datasize'. Get the smallest piece.
*/
piece = curlx_sotouz((ch->datasize >= length)?length:ch->datasize);
piece = blen;
if(ch->datasize < (curl_off_t)blen)
piece = curlx_sotouz(ch->datasize);
/* Write the data portion available */
if(!data->set.http_te_skip && !k->ignorebody) {
result = Curl_client_write(data, CLIENTWRITE_BODY, datap, piece);
result = Curl_client_write(data, CLIENTWRITE_BODY, buf, piece);
if(result) {
*extrap = result;
@ -183,10 +184,10 @@ CHUNKcode Curl_httpchunk_read(struct Curl_easy *data,
}
}
*wrote += piece;
*pconsumed += piece;
ch->datasize -= piece; /* decrease amount left to expect */
datap += piece; /* move read pointer forward */
length -= piece; /* decrease space left in this round */
buf += piece; /* move read pointer forward */
blen -= piece; /* decrease space left in this round */
if(0 == ch->datasize)
/* end of data this round, we now expect a trailing CRLF */
@ -194,18 +195,18 @@ CHUNKcode Curl_httpchunk_read(struct Curl_easy *data,
break;
case CHUNK_POSTLF:
if(*datap == 0x0a) {
if(*buf == 0x0a) {
/* The last one before we go back to hex state and start all over. */
Curl_httpchunk_init(data); /* sets state back to CHUNK_HEX */
}
else if(*datap != 0x0d)
else if(*buf != 0x0d)
return CHUNKE_BAD_CHUNK;
datap++;
length--;
buf++;
blen--;
break;
case CHUNK_TRAILER:
if((*datap == 0x0d) || (*datap == 0x0a)) {
if((*buf == 0x0d) || (*buf == 0x0a)) {
char *tr = Curl_dyn_ptr(&conn->trailer);
/* this is the end of a trailer, but if the trailer was zero bytes
there was no trailer and we move on */
@ -229,7 +230,7 @@ CHUNKcode Curl_httpchunk_read(struct Curl_easy *data,
}
Curl_dyn_reset(&conn->trailer);
ch->state = CHUNK_TRAILER_CR;
if(*datap == 0x0a)
if(*buf == 0x0a)
/* already on the LF */
break;
}
@ -240,19 +241,19 @@ CHUNKcode Curl_httpchunk_read(struct Curl_easy *data,
}
}
else {
result = Curl_dyn_addn(&conn->trailer, datap, 1);
result = Curl_dyn_addn(&conn->trailer, buf, 1);
if(result)
return CHUNKE_OUT_OF_MEMORY;
}
datap++;
length--;
buf++;
blen--;
break;
case CHUNK_TRAILER_CR:
if(*datap == 0x0a) {
if(*buf == 0x0a) {
ch->state = CHUNK_TRAILER_POSTCR;
datap++;
length--;
buf++;
blen--;
}
else
return CHUNKE_BAD_CHUNK;
@ -261,27 +262,27 @@ CHUNKcode Curl_httpchunk_read(struct Curl_easy *data,
case CHUNK_TRAILER_POSTCR:
/* We enter this state when a CR should arrive so we expect to
have to first pass a CR before we wait for LF */
if((*datap != 0x0d) && (*datap != 0x0a)) {
if((*buf != 0x0d) && (*buf != 0x0a)) {
/* not a CR then it must be another header in the trailer */
ch->state = CHUNK_TRAILER;
break;
}
if(*datap == 0x0d) {
if(*buf == 0x0d) {
/* skip if CR */
datap++;
length--;
buf++;
blen--;
}
/* now wait for the final LF */
ch->state = CHUNK_STOP;
break;
case CHUNK_STOP:
if(*datap == 0x0a) {
length--;
if(*buf == 0x0a) {
blen--;
/* Record the length of any data left in the end of the buffer
even if there's no more chunks to read */
ch->datasize = curlx_sotouz(length);
ch->datasize = blen;
return CHUNKE_STOP; /* return stop */
}

View File

@ -93,8 +93,8 @@ struct Curl_chunker {
/* The following functions are defined in http_chunks.c */
void Curl_httpchunk_init(struct Curl_easy *data);
CHUNKcode Curl_httpchunk_read(struct Curl_easy *data, char *datap,
ssize_t length, ssize_t *wrote,
CHUNKcode Curl_httpchunk_read(struct Curl_easy *data, char *buf,
size_t blen, size_t *pconsumed,
CURLcode *passthru);
#endif /* HEADER_CURL_HTTP_CHUNKS_H */

View File

@ -59,14 +59,19 @@ static int rtsp_getsock_do(struct Curl_easy *data,
/*
* Parse and write out any available RTP data.
*
* nread: amount of data left after k->str. will be modified if RTP
* data is parsed and k->str is moved up
* readmore: whether or not the RTP parser needs more data right away
* @param data the transfer
* @param conn the connection
* @param buf data read from connection
* @param blen amount of data in buf
* @param consumed out, number of blen consumed
* @param readmore out, TRUE iff complete buf was consumed and more data
* is needed
*/
static CURLcode rtsp_rtp_readwrite(struct Curl_easy *data,
struct connectdata *conn,
ssize_t *nread,
const char *buf,
size_t blen,
size_t *pconsumed,
bool *readmore);
static CURLcode rtsp_setup_connection(struct Curl_easy *data,
@ -754,14 +759,14 @@ out:
static CURLcode rtsp_rtp_readwrite(struct Curl_easy *data,
struct connectdata *conn,
ssize_t *nread,
const char *buf,
size_t blen,
size_t *pconsumed,
bool *readmore)
{
struct rtsp_conn *rtspc = &(conn->proto.rtspc);
CURLcode result = CURLE_OK;
size_t consumed = 0;
char *buf;
size_t blen;
bool in_body;
if(!data->req.header)
@ -770,11 +775,8 @@ static CURLcode rtsp_rtp_readwrite(struct Curl_easy *data,
(data->req.size >= 0) &&
(data->req.bytecount < data->req.size);
DEBUGASSERT(*nread >= 0);
blen = (size_t)(*nread);
buf = data->req.str;
*readmore = FALSE;
*pconsumed = 0;
if(!blen) {
goto out;
}
@ -784,6 +786,7 @@ static CURLcode rtsp_rtp_readwrite(struct Curl_easy *data,
result = rtsp_filter_rtp(data, conn, buf, blen, in_body, &consumed);
if(result)
goto out;
*pconsumed += consumed;
buf += consumed;
blen -= consumed;
}
@ -791,16 +794,16 @@ static CURLcode rtsp_rtp_readwrite(struct Curl_easy *data,
/* we want to parse headers, do so */
if(data->req.header && blen) {
bool stop_reading;
rtspc->in_header = TRUE;
data->req.str = buf;
*nread = blen;
result = Curl_http_readwrite_headers(data, conn, nread, &stop_reading);
result = Curl_http_readwrite_headers(data, conn, buf, blen,
&consumed, &stop_reading);
if(result)
goto out;
DEBUGASSERT(*nread >= 0);
blen = (size_t)(*nread);
buf = data->req.str;
*pconsumed += consumed;
buf += consumed;
blen -= consumed;
if(!data->req.header)
rtspc->in_header = FALSE;
@ -813,13 +816,10 @@ static CURLcode rtsp_rtp_readwrite(struct Curl_easy *data,
result = rtsp_filter_rtp(data, conn, buf, blen, in_body, &consumed);
if(result)
goto out;
buf += consumed;
blen -= consumed;
*pconsumed += consumed;
}
}
data->req.str = buf;
*nread = blen;
if(rtspc->state != RTP_PARSE_SKIP)
*readmore = TRUE;

View File

@ -413,6 +413,26 @@ bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc)
return TRUE;
}
static size_t get_max_body_write_len(struct Curl_easy *data)
{
if(data->req.maxdownload != -1) {
/* How much more are we allowed to write? */
curl_off_t remain_diff;
remain_diff = data->req.maxdownload - data->req.bytecount;
if(remain_diff < 0) {
/* already written too much! */
return 0;
}
else if(remain_diff > SSIZE_T_MAX) {
return SIZE_T_MAX;
}
else {
return (size_t)remain_diff;
}
}
return SIZE_T_MAX;
}
/*
* Go ahead and do a read if we have a readable socket or if
* the stream was rewound (in which case we have data in a
@ -428,16 +448,15 @@ static CURLcode readwrite_data(struct Curl_easy *data,
bool *comeback)
{
CURLcode result = CURLE_OK;
ssize_t nread; /* number of bytes read */
ssize_t n_to_write;
bool readmore = FALSE; /* used by RTP to signal for more data */
char *buf, *excess_data;
size_t blen, hd_data_len, excess_len;
size_t consumed;
int maxloops = 100;
curl_off_t max_recv = data->set.max_recv_speed?
data->set.max_recv_speed : CURL_OFF_T_MAX;
char *buf = data->state.buffer;
bool data_eof_handled = FALSE;
DEBUGASSERT(buf);
DEBUGASSERT(data->state.buffer);
*done = FALSE;
*comeback = FALSE;
@ -445,9 +464,7 @@ static CURLcode readwrite_data(struct Curl_easy *data,
read or we get a CURLE_AGAIN */
do {
bool is_empty_data = FALSE;
size_t excess = 0; /* excess bytes read */
size_t buffersize = data->set.buffer_size;
size_t bytestoread = buffersize;
size_t bytestoread = data->set.buffer_size;
/* For HTTP/2 and HTTP/3, read data without caring about the content
length. This is safe because body in HTTP/2 is always segmented
thanks to its framing layer. Meanwhile, we have to call Curl_read
@ -456,31 +473,40 @@ static CURLcode readwrite_data(struct Curl_easy *data,
bool is_http3 = Curl_conn_is_http3(data, conn, FIRSTSOCKET);
data_eof_handled = is_http3 || Curl_conn_is_http2(data, conn, FIRSTSOCKET);
if(!data_eof_handled && k->size != -1 && !k->header) {
/* make sure we don't read too much */
/* Each loop iteration starts with a fresh buffer and handles
* all data read into it. */
buf = data->state.buffer;
blen = 0;
excess_data = NULL;
excess_len = 0;
/* If we are reading BODY data and the connection does NOT handle EOF
* and we know the size of the BODY data, limit the read amount */
if(!k->header && !data_eof_handled && k->size != -1) {
curl_off_t totalleft = k->size - k->bytecount;
if(totalleft < (curl_off_t)bytestoread)
if(totalleft <= 0)
bytestoread = 0;
else if(totalleft < (curl_off_t)bytestoread)
bytestoread = (size_t)totalleft;
}
if(bytestoread) {
/* receive data from the network! */
ssize_t nread; /* number of bytes read */
result = Curl_read(data, conn->sockfd, buf, bytestoread, &nread);
/* read would've blocked */
if(CURLE_AGAIN == result) {
result = CURLE_OK;
break; /* get out of loop */
}
if(result>0)
else if(result)
goto out;
DEBUGASSERT(nread >= 0);
blen = (size_t)nread;
}
else {
/* read nothing but since we wanted nothing we consider this an OK
situation to proceed from */
DEBUGF(infof(data, "readwrite_data: we're done"));
nread = 0;
}
if(!k->bytecount) {
@ -492,12 +518,17 @@ static CURLcode readwrite_data(struct Curl_easy *data,
*didwhat |= KEEP_RECV;
/* indicates data of zero size, i.e. empty file */
is_empty_data = ((nread == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
is_empty_data = ((blen == 0) && (k->bodywrites == 0)) ? TRUE : FALSE;
if(0 < nread || is_empty_data) {
buf[nread] = 0;
if(0 < blen || is_empty_data) {
/* data->state.buffer is allocated 1 byte larger than
* data->set.buffer_size admits. *wink* */
/* TODO: we should really not rely on this being 0-terminated, since
* the actual data read might contain 0s. */
buf[blen] = 0;
}
if(!nread) {
if(!blen) {
/* if we receive 0 or less here, either the data transfer is done or the
server closed the connection and we bail out from this! */
if(data_eof_handled)
@ -509,46 +540,55 @@ static CURLcode readwrite_data(struct Curl_easy *data,
break;
}
/* Default buffer to use when we write the buffer, it may be changed
in the flow below before the actual storing is done. */
k->str = buf;
if(conn->handler->readwrite) {
result = conn->handler->readwrite(data, conn, &nread, &readmore);
bool readmore = FALSE; /* indicates data is incomplete, need more */
consumed = 0;
result = conn->handler->readwrite(data, conn, buf, blen,
&consumed, &readmore);
if(result)
goto out;
if(readmore)
break;
buf += consumed;
blen -= consumed;
}
#ifndef CURL_DISABLE_HTTP
/* Since this is a two-state thing, we check if we are parsing
headers at the moment or not. */
if(k->header) {
/* we are in parse-the-header-mode */
bool stop_reading = FALSE;
result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
consumed = 0;
result = Curl_http_readwrite_headers(data, conn, buf, blen,
&consumed, &stop_reading);
if(result)
goto out;
buf += consumed;
blen -= consumed;
if(conn->handler->readwrite &&
(k->maxdownload <= 0 && nread > 0)) {
result = conn->handler->readwrite(data, conn, &nread, &readmore);
(k->maxdownload <= 0 && blen > 0)) {
bool readmore = FALSE; /* indicates data is incomplete, need more */
consumed = 0;
result = conn->handler->readwrite(data, conn, buf, blen,
&consumed, &readmore);
if(result)
goto out;
if(readmore)
break;
buf += consumed;
blen -= consumed;
}
if(stop_reading) {
/* We've stopped dealing with input, get out of the do-while loop */
if(nread > 0) {
if(blen > 0) {
infof(data,
"Excess found:"
" excess = %zd"
" excess = %zu"
" url = %s (zero-length body)",
nread, data->state.up.path);
blen, data->state.up.path);
}
break;
@ -560,13 +600,13 @@ static CURLcode readwrite_data(struct Curl_easy *data,
/* This is not an 'else if' since it may be a rest from the header
parsing, where the beginning of the buffer is headers and the end
is non-headers. */
if(!k->header && (nread > 0 || is_empty_data)) {
if(!k->header && (blen > 0 || is_empty_data)) {
if(data->req.no_body && nread > 0) {
if(data->req.no_body && blen > 0) {
/* data arrives although we want none, bail out */
streamclose(conn, "ignoring body");
DEBUGF(infof(data, "did not want a BODY, but seeing %zd bytes",
nread));
DEBUGF(infof(data, "did not want a BODY, but seeing %zu bytes",
blen));
*done = TRUE;
result = CURLE_WEIRD_SERVER_REPLY;
goto out;
@ -590,12 +630,13 @@ static CURLcode readwrite_data(struct Curl_easy *data,
/*
* Here comes a chunked transfer flying and we need to decode this
* properly. While the name says read, this function both reads
* and writes away the data. The returned 'nread' holds the number
* of actual data it wrote to the client.
* and writes away the data.
*/
CURLcode extra;
CHUNKcode res =
Curl_httpchunk_read(data, k->str, nread, &nread, &extra);
CHUNKcode res;
consumed = 0;
res = Curl_httpchunk_read(data, buf, blen, &consumed, &extra);
if(CHUNKE_OK < res) {
if(CHUNKE_PASSTHRU_ERROR == res) {
@ -607,9 +648,14 @@ static CURLcode readwrite_data(struct Curl_easy *data,
result = CURLE_RECV_ERROR;
goto out;
}
if(CHUNKE_STOP == res) {
buf += consumed;
blen -= consumed;
if(CHUNKE_STOP == res) {
/* we're done reading chunks! */
k->keepon &= ~KEEP_RECV; /* read no more */
/* chunks read successfully, download is complete */
k->download_done = TRUE;
/* N number of bytes at the end of the str buffer that weren't
written to the client. */
@ -623,43 +669,59 @@ static CURLcode readwrite_data(struct Curl_easy *data,
}
#endif /* CURL_DISABLE_HTTP */
/* Account for body content stored in the header buffer */
n_to_write = nread;
if((k->badheader == HEADER_PARTHEADER) && !k->ignorebody) {
n_to_write += Curl_dyn_len(&data->state.headerb);
}
/* If we know how much to download, have we reached the last bytes? */
if(-1 != k->maxdownload) {
size_t max_write_len = get_max_body_write_len(data);
if((-1 != k->maxdownload) &&
(k->bytecount + n_to_write >= k->maxdownload)) {
/* Account for body content stillin the header buffer */
hd_data_len = k->badheader? Curl_dyn_len(&data->state.headerb) : 0;
if(blen + hd_data_len >= max_write_len) {
/* We have all download bytes, but do we have too many? */
excess_len = (blen + hd_data_len) - max_write_len;
if(excess_len > 0 && !k->ignorebody) {
infof(data,
"Excess found in a read:"
" excess = %zu"
", size = %" CURL_FORMAT_CURL_OFF_T
", maxdownload = %" CURL_FORMAT_CURL_OFF_T
", bytecount = %" CURL_FORMAT_CURL_OFF_T,
excess_len, k->size, k->maxdownload, k->bytecount);
connclose(conn, "excess found in a read");
}
excess = (size_t)(k->bytecount + n_to_write - k->maxdownload);
if(excess > 0 && !k->ignorebody) {
infof(data,
"Excess found in a read:"
" excess = %zu"
", size = %" CURL_FORMAT_CURL_OFF_T
", maxdownload = %" CURL_FORMAT_CURL_OFF_T
", bytecount = %" CURL_FORMAT_CURL_OFF_T,
excess, k->size, k->maxdownload, k->bytecount);
connclose(conn, "excess found in a read");
}
if(!excess_len) {
/* no excess bytes, perfect! */
excess_data = NULL;
}
else if(hd_data_len >= excess_len) {
/* uh oh, header body data already exceeds, the whole `buf`
* is excess data */
excess_len = blen;
excess_data = buf;
blen = 0;
}
else {
/* `buf` bytes exceed, shorten and set `excess_data` */
excess_len -= hd_data_len;
DEBUGASSERT(blen >= excess_len);
blen -= excess_len;
excess_data = buf + blen;
}
nread = (ssize_t) (k->maxdownload - k->bytecount);
if(nread < 0) /* this should be unusual */
nread = 0;
/* HTTP/3 over QUIC should keep reading until QUIC connection
is closed. In contrast to HTTP/2 which can stop reading
from TCP connection, HTTP/3 over QUIC needs ACK from server
to ensure stream closure. It should keep reading. */
if(!is_http3) {
k->keepon &= ~KEEP_RECV; /* we're done reading */
/* HTTP/3 over QUIC should keep reading until QUIC connection
is closed. In contrast to HTTP/2 which can stop reading
from TCP connection, HTTP/3 over QUIC needs ACK from server
to ensure stream closure. It should keep reading. */
if(!is_http3) {
k->keepon &= ~KEEP_RECV; /* we're done reading */
}
k->download_done = TRUE;
}
}
max_recv -= nread;
max_recv -= blen;
if(!k->chunk && (nread || k->badheader || is_empty_data)) {
if(!k->chunk && (blen || k->badheader || is_empty_data)) {
/* If this is chunky transfer, it was already written */
if(k->badheader) {
@ -668,37 +730,27 @@ static CURLcode readwrite_data(struct Curl_easy *data,
size_t headlen = Curl_dyn_len(&data->state.headerb);
/* Don't let excess data pollute body writes */
if(k->maxdownload == -1 || (curl_off_t)headlen <= k->maxdownload)
result = Curl_client_write(data, CLIENTWRITE_BODY,
Curl_dyn_ptr(&data->state.headerb),
headlen);
else
result = Curl_client_write(data, CLIENTWRITE_BODY,
Curl_dyn_ptr(&data->state.headerb),
(size_t)k->maxdownload);
if(k->maxdownload != -1 && (curl_off_t)headlen > k->maxdownload)
headlen = (size_t)k->maxdownload;
result = Curl_client_write(data, CLIENTWRITE_BODY,
Curl_dyn_ptr(&data->state.headerb),
headlen);
if(result)
goto out;
}
if(k->badheader < HEADER_ALLBAD) {
/* This switch handles various content encodings. If there's an
error here, be sure to check over the almost identical code
in http_chunks.c.
Make sure that ALL_CONTENT_ENCODINGS contains all the
encodings handled here. */
if(nread) {
if(blen) {
#ifndef CURL_DISABLE_POP3
if(conn->handler->protocol & PROTO_FAMILY_POP3) {
result = k->ignorebody? CURLE_OK :
Curl_pop3_write(data, k->str, nread);
}
else
#endif /* CURL_DISABLE_POP3 */
result = Curl_client_write(data, CLIENTWRITE_BODY, k->str,
nread);
if(conn->handler->protocol & PROTO_FAMILY_POP3) {
result = k->ignorebody? CURLE_OK :
Curl_pop3_write(data, buf, blen);
}
else
#endif /* CURL_DISABLE_POP3 */
result = Curl_client_write(data, CLIENTWRITE_BODY, buf, blen);
}
k->badheader = HEADER_NORMAL; /* taken care of now */
k->badheader = FALSE; /* taken care of now */
if(result)
goto out;
@ -706,45 +758,25 @@ static CURLcode readwrite_data(struct Curl_easy *data,
} /* if(!header and data to read) */
if(excess > 0 && !k->ignorebody) {
if(conn->handler->readwrite) {
/* Give protocol handler a chance to do something with it */
k->str += nread;
if(&k->str[excess] > &buf[data->set.buffer_size]) {
/* the excess amount was too excessive(!), make sure
it doesn't read out of buffer */
excess = &buf[data->set.buffer_size] - k->str;
}
nread = (ssize_t)excess;
result = conn->handler->readwrite(data, conn, &nread, &readmore);
if(result)
goto out;
if(conn->handler->readwrite && excess_data) {
bool readmore = FALSE; /* indicates data is incomplete, need more */
if(readmore) {
DEBUGASSERT(nread == 0);
k->keepon |= KEEP_RECV; /* we're not done reading */
}
else if(nread == 0)
break;
/* protocol handler did not consume all excess data */
excess = nread;
}
if(excess) {
infof(data,
"Excess found in a read:"
" excess = %zu"
", size = %" CURL_FORMAT_CURL_OFF_T
", maxdownload = %" CURL_FORMAT_CURL_OFF_T
", bytecount = %" CURL_FORMAT_CURL_OFF_T,
excess, k->size, k->maxdownload, k->bytecount);
connclose(conn, "excess found in a read");
}
consumed = 0;
result = conn->handler->readwrite(data, conn, excess_data, excess_len,
&consumed, &readmore);
if(result)
goto out;
if(readmore)
k->keepon |= KEEP_RECV; /* we're not done reading */
break;
}
if(is_empty_data) {
/* if we received nothing, the server closed the connection and we
are done */
k->keepon &= ~KEEP_RECV;
k->download_done = TRUE;
}
if((k->keepon & KEEP_RECV_PAUSE) || !(k->keepon & KEEP_RECV)) {

View File

@ -672,16 +672,8 @@ struct SingleRequest {
counter to make only a 100 reply (without
a following second response code) result
in a CURLE_GOT_NOTHING error code */
enum {
HEADER_NORMAL, /* no bad header at all */
HEADER_PARTHEADER, /* part of the chunk is a bad header, the rest
is normal data */
HEADER_ALLBAD /* all was believed to be header */
} badheader; /* the header was deemed bad and will be
written as body */
int headerline; /* counts header lines to better track the
first one */
char *str; /* within buf */
curl_off_t offset; /* possible resume offset read from the
Content-Range: header */
int httpcode; /* error code from the 'HTTP/1.? XXX' or
@ -740,7 +732,9 @@ struct SingleRequest {
#endif
unsigned char writer_stack_depth; /* Unencoding stack depth. */
BIT(header); /* incoming data has HTTP header */
BIT(badheader); /* header parsing found sth not a header */
BIT(content_range); /* set TRUE if Content-Range: was found */
BIT(download_done); /* set to TRUE when download is complete */
BIT(upload_done); /* set to TRUE when doing chunked transfer-encoding
upload and we're uploading the last chunk */
BIT(ignorebody); /* we read a response-body but we ignore it! */
@ -824,7 +818,8 @@ struct Curl_handler {
/* If used, this function gets called from transfer.c:readwrite_data() to
allow the protocol to do extra reads/writes */
CURLcode (*readwrite)(struct Curl_easy *data, struct connectdata *conn,
ssize_t *nread, bool *readmore);
const char *buf, size_t blen,
size_t *pconsumed, bool *readmore);
/* This function can perform various checks on the connection. See
CONNCHECK_* for more information about the checks that can be performed,