mirror of
https://github.com/curl/curl.git
synced 2025-03-19 15:40:42 +08:00
http: expect 100 rework
Move all handling of HTTP's `Expect: 100-continue` feature into a client reader. Add sending flag `KEEP_SEND_TIMED` that triggers transfer sending on general events like a timer. HTTP installs a `CURL_CR_PROTOCOL` reader when announcing `Expect: 100-continue`. That reader works as follows: - on first invocation, records time, starts the `EXPIRE_100_TIMEOUT` timer, disables `KEEP_SEND`, enables `KEEP_SEND_TIMER` and returns 0, eos=FALSE like a paused upload. - on subsequent invocation it checks if the timer has expired. If so, it enables `KEEP_SEND` and switches to passing through reads to the underlying readers. Transfer handling's `readwrite()` will be invoked when a timer expires (like `EXPIRE_100_TIMEOUT`) or when data from the server arrives. Seeing `KEEP_SEND_TIMER`, it will try to upload more data, which triggers reading from the client readers again. Which then may lead to a new pausing or cause the upload to start. Flags and timestamps connected to this have been moved from `SingleRequest` into the reader's context. Closes #13110
This commit is contained in:
parent
3d0fd382a2
commit
80a3b830cc
@ -219,17 +219,16 @@ static int hyper_body_chunk(void *userdata, const hyper_buf *chunk)
|
||||
Curl_safefree(data->req.newurl);
|
||||
}
|
||||
#endif
|
||||
if(data->state.expect100header) {
|
||||
Curl_expire_done(data, EXPIRE_100_TIMEOUT);
|
||||
if(Curl_http_exp100_is_selected(data)) {
|
||||
if(data->req.httpcode < 400) {
|
||||
k->exp100 = EXP100_SEND_DATA;
|
||||
if(data->hyp.exp100_waker) {
|
||||
hyper_waker_wake(data->hyp.exp100_waker);
|
||||
data->hyp.exp100_waker = NULL;
|
||||
Curl_http_exp100_got100(data);
|
||||
if(data->hyp.send_body_waker) {
|
||||
hyper_waker_wake(data->hyp.send_body_waker);
|
||||
data->hyp.send_body_waker = NULL;
|
||||
}
|
||||
}
|
||||
else { /* >= 4xx */
|
||||
k->exp100 = EXP100_FAILED;
|
||||
Curl_req_abort_sending(data);
|
||||
}
|
||||
}
|
||||
if(data->state.hconnect && (data->req.httpcode/100 != 2) &&
|
||||
@ -353,20 +352,9 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data,
|
||||
struct SingleRequest *k = &data->req;
|
||||
(void)conn;
|
||||
|
||||
if(k->exp100 > EXP100_SEND_DATA) {
|
||||
struct curltime now = Curl_now();
|
||||
timediff_t ms = Curl_timediff(now, k->start100);
|
||||
if(ms >= data->set.expect_100_timeout) {
|
||||
/* we've waited long enough, continue anyway */
|
||||
k->exp100 = EXP100_SEND_DATA;
|
||||
k->keepon |= KEEP_SEND;
|
||||
Curl_expire_done(data, EXPIRE_100_TIMEOUT);
|
||||
infof(data, "Done waiting for 100-continue after %ldms", (long)ms);
|
||||
if(data->hyp.exp100_waker) {
|
||||
hyper_waker_wake(data->hyp.exp100_waker);
|
||||
data->hyp.exp100_waker = NULL;
|
||||
}
|
||||
}
|
||||
if(data->hyp.send_body_waker) {
|
||||
hyper_waker_wake(data->hyp.send_body_waker);
|
||||
data->hyp.send_body_waker = NULL;
|
||||
}
|
||||
|
||||
if(select_res & CURL_CSELECT_IN) {
|
||||
@ -461,7 +449,7 @@ CURLcode Curl_hyper_stream(struct Curl_easy *data,
|
||||
reasonp = hyper_response_reason_phrase(resp);
|
||||
reason_len = hyper_response_reason_phrase_len(resp);
|
||||
|
||||
if(http_status == 417 && data->state.expect100header) {
|
||||
if(http_status == 417 && Curl_http_exp100_is_selected(data)) {
|
||||
infof(data, "Got 417 while waiting for a 100");
|
||||
data->state.disableexpect = TRUE;
|
||||
data->req.newurl = strdup(data->state.url);
|
||||
@ -665,17 +653,6 @@ static int uploadstreamed(void *userdata, hyper_context *ctx,
|
||||
int rc = HYPER_POLL_ERROR;
|
||||
(void)ctx;
|
||||
|
||||
if(data->req.exp100 > EXP100_SEND_DATA) {
|
||||
if(data->req.exp100 == EXP100_FAILED)
|
||||
return HYPER_POLL_ERROR;
|
||||
|
||||
/* still waiting confirmation */
|
||||
if(data->hyp.exp100_waker)
|
||||
hyper_waker_free(data->hyp.exp100_waker);
|
||||
data->hyp.exp100_waker = hyper_context_waker(ctx);
|
||||
return HYPER_POLL_PENDING;
|
||||
}
|
||||
|
||||
result = Curl_multi_xfer_ulbuf_borrow(data, &xfer_ulbuf, &xfer_ulblen);
|
||||
if(result)
|
||||
goto out;
|
||||
@ -974,11 +951,6 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done)
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
else {
|
||||
if(!data->state.disableexpect) {
|
||||
data->state.expect100header = TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
if(hyper_request_set_method(req, (uint8_t *)method, strlen(method))) {
|
||||
failf(data, "error setting method");
|
||||
@ -1158,13 +1130,10 @@ CURLcode Curl_http(struct Curl_easy *data, bool *done)
|
||||
if((httpreq == HTTPREQ_GET) || (httpreq == HTTPREQ_HEAD)) {
|
||||
/* HTTP GET/HEAD download */
|
||||
Curl_pgrsSetUploadSize(data, 0); /* nothing */
|
||||
Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, -1);
|
||||
}
|
||||
|
||||
Curl_xfer_setup(data, FIRSTSOCKET, -1, TRUE, FIRSTSOCKET);
|
||||
conn->datastream = Curl_hyper_stream;
|
||||
if(data->state.expect100header)
|
||||
/* Timeout count starts now since with Hyper we don't know exactly when
|
||||
the full request has been sent. */
|
||||
data->req.start100 = Curl_now();
|
||||
|
||||
/* clear userpwd and proxyuserpwd to avoid reusing old credentials
|
||||
* from reused connections */
|
||||
@ -1206,10 +1175,6 @@ void Curl_hyper_done(struct Curl_easy *data)
|
||||
hyper_waker_free(h->write_waker);
|
||||
h->write_waker = NULL;
|
||||
}
|
||||
if(h->exp100_waker) {
|
||||
hyper_waker_free(h->exp100_waker);
|
||||
h->exp100_waker = NULL;
|
||||
}
|
||||
if(h->send_body_waker) {
|
||||
hyper_waker_free(h->send_body_waker);
|
||||
h->send_body_waker = NULL;
|
||||
@ -1238,6 +1203,7 @@ static const struct Curl_crtype cr_hyper_protocol = {
|
||||
Curl_creader_def_resume_from,
|
||||
Curl_creader_def_rewind,
|
||||
cr_hyper_unpause,
|
||||
Curl_creader_def_done,
|
||||
sizeof(struct Curl_creader)
|
||||
};
|
||||
|
||||
|
@ -39,7 +39,6 @@ struct hyptransfer {
|
||||
hyper_waker *write_waker;
|
||||
hyper_waker *read_waker;
|
||||
const hyper_executor *exec;
|
||||
hyper_waker *exp100_waker;
|
||||
hyper_waker *send_body_waker;
|
||||
struct hyp_io_ctx io_ctx;
|
||||
};
|
||||
|
237
lib/http.c
237
lib/http.c
@ -102,6 +102,9 @@
|
||||
*/
|
||||
|
||||
static bool http_should_fail(struct Curl_easy *data);
|
||||
static bool http_exp100_is_waiting(struct Curl_easy *data);
|
||||
static CURLcode http_exp100_add_reader(struct Curl_easy *data);
|
||||
static void http_exp100_send_anyway(struct Curl_easy *data);
|
||||
|
||||
/*
|
||||
* HTTP handler interface.
|
||||
@ -1275,31 +1278,6 @@ static const char *get_http_string(const struct Curl_easy *data,
|
||||
}
|
||||
#endif
|
||||
|
||||
/* check and possibly add an Expect: header */
|
||||
static CURLcode expect100(struct Curl_easy *data, struct dynbuf *req)
|
||||
{
|
||||
CURLcode result = CURLE_OK;
|
||||
if(!data->state.disableexpect &&
|
||||
Curl_use_http_1_1plus(data, data->conn) &&
|
||||
(data->conn->httpversion < 20)) {
|
||||
/* if not doing HTTP 1.0 or version 2, or disabled explicitly, we add an
|
||||
Expect: 100-continue to the headers which actually speeds up post
|
||||
operations (as there is one packet coming back from the web server) */
|
||||
const char *ptr = Curl_checkheaders(data, STRCONST("Expect"));
|
||||
if(ptr) {
|
||||
data->state.expect100header =
|
||||
Curl_compareheader(ptr, STRCONST("Expect:"), STRCONST("100-continue"));
|
||||
}
|
||||
else {
|
||||
result = Curl_dyn_addn(req, STRCONST("Expect: 100-continue\r\n"));
|
||||
if(!result)
|
||||
data->state.expect100header = TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
enum proxy_use {
|
||||
HEADER_SERVER, /* direct to server */
|
||||
HEADER_PROXY, /* regular request to proxy */
|
||||
@ -2164,26 +2142,39 @@ CURLcode Curl_http_req_set_reader(struct Curl_easy *data,
|
||||
return result;
|
||||
}
|
||||
|
||||
static CURLcode addexpect(struct Curl_easy *data, struct dynbuf *r)
|
||||
static CURLcode addexpect(struct Curl_easy *data, struct dynbuf *r,
|
||||
bool *announced_exp100)
|
||||
{
|
||||
data->state.expect100header = FALSE;
|
||||
CURLcode result;
|
||||
char *ptr;
|
||||
|
||||
*announced_exp100 = FALSE;
|
||||
/* Avoid Expect: 100-continue if Upgrade: is used */
|
||||
if(data->req.upgr101 == UPGR101_INIT) {
|
||||
/* For really small puts we don't use Expect: headers at all, and for
|
||||
the somewhat bigger ones we allow the app to disable it. Just make
|
||||
sure that the expect100header is always set to the preferred value
|
||||
here. */
|
||||
char *ptr = Curl_checkheaders(data, STRCONST("Expect"));
|
||||
if(ptr) {
|
||||
data->state.expect100header =
|
||||
Curl_compareheader(ptr, STRCONST("Expect:"),
|
||||
STRCONST("100-continue"));
|
||||
if(data->req.upgr101 != UPGR101_INIT)
|
||||
return CURLE_OK;
|
||||
|
||||
/* For really small puts we don't use Expect: headers at all, and for
|
||||
the somewhat bigger ones we allow the app to disable it. Just make
|
||||
sure that the expect100header is always set to the preferred value
|
||||
here. */
|
||||
ptr = Curl_checkheaders(data, STRCONST("Expect"));
|
||||
if(ptr) {
|
||||
*announced_exp100 =
|
||||
Curl_compareheader(ptr, STRCONST("Expect:"), STRCONST("100-continue"));
|
||||
}
|
||||
else if(!data->state.disableexpect &&
|
||||
Curl_use_http_1_1plus(data, data->conn) &&
|
||||
(data->conn->httpversion < 20)) {
|
||||
/* if not doing HTTP 1.0 or version 2, or disabled explicitly, we add an
|
||||
Expect: 100-continue to the headers which actually speeds up post
|
||||
operations (as there is one packet coming back from the web server) */
|
||||
curl_off_t client_len = Curl_creader_client_length(data);
|
||||
if(client_len > EXPECT_100_THRESHOLD || client_len < 0) {
|
||||
result = Curl_dyn_addn(r, STRCONST("Expect: 100-continue\r\n"));
|
||||
if(result)
|
||||
return result;
|
||||
*announced_exp100 = TRUE;
|
||||
}
|
||||
else {
|
||||
curl_off_t client_len = Curl_creader_client_length(data);
|
||||
if(client_len > EXPECT_100_THRESHOLD || client_len < 0)
|
||||
return expect100(data, r);
|
||||
}
|
||||
}
|
||||
return CURLE_OK;
|
||||
}
|
||||
@ -2193,6 +2184,7 @@ CURLcode Curl_http_req_complete(struct Curl_easy *data,
|
||||
{
|
||||
CURLcode result = CURLE_OK;
|
||||
curl_off_t req_clen;
|
||||
bool announced_exp100 = FALSE;
|
||||
|
||||
DEBUGASSERT(data->conn);
|
||||
#ifndef USE_HYPER
|
||||
@ -2251,7 +2243,7 @@ CURLcode Curl_http_req_complete(struct Curl_easy *data,
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
result = addexpect(data, r);
|
||||
result = addexpect(data, r, &announced_exp100);
|
||||
if(result)
|
||||
goto out;
|
||||
break;
|
||||
@ -2262,6 +2254,8 @@ CURLcode Curl_http_req_complete(struct Curl_easy *data,
|
||||
/* end of headers */
|
||||
result = Curl_dyn_addn(r, STRCONST("\r\n"));
|
||||
Curl_pgrsSetUploadSize(data, req_clen);
|
||||
if(announced_exp100)
|
||||
result = http_exp100_add_reader(data);
|
||||
|
||||
out:
|
||||
if(!result) {
|
||||
@ -3434,11 +3428,7 @@ static CURLcode http_rw_headers(struct Curl_easy *data,
|
||||
k->headerline = 0; /* restart the header line counter */
|
||||
|
||||
/* if we did wait for this do enable write now! */
|
||||
if(k->exp100 > EXP100_SEND_DATA) {
|
||||
k->exp100 = EXP100_SEND_DATA;
|
||||
k->keepon |= KEEP_SEND;
|
||||
Curl_expire_done(data, EXPIRE_100_TIMEOUT);
|
||||
}
|
||||
Curl_http_exp100_got100(data);
|
||||
break;
|
||||
case 101:
|
||||
if(conn->httpversion == 11) {
|
||||
@ -3617,13 +3607,11 @@ static CURLcode http_rw_headers(struct Curl_easy *data,
|
||||
* request body has been sent we stop sending and mark the
|
||||
* connection for closure after we've read the entire response.
|
||||
*/
|
||||
Curl_expire_done(data, EXPIRE_100_TIMEOUT);
|
||||
if(!Curl_req_done_sending(data)) {
|
||||
if((k->httpcode == 417) && data->state.expect100header) {
|
||||
if((k->httpcode == 417) && Curl_http_exp100_is_selected(data)) {
|
||||
/* 417 Expectation Failed - try again without the Expect
|
||||
header */
|
||||
if(!k->writebytecount &&
|
||||
k->exp100 == EXP100_AWAITING_CONTINUE) {
|
||||
if(!k->writebytecount && http_exp100_is_waiting(data)) {
|
||||
infof(data, "Got HTTP failure 417 while waiting for a 100");
|
||||
}
|
||||
else {
|
||||
@ -3641,10 +3629,7 @@ static CURLcode http_rw_headers(struct Curl_easy *data,
|
||||
}
|
||||
else if(data->set.http_keep_sending_on_error) {
|
||||
infof(data, "HTTP error before end of send, keep sending");
|
||||
if(k->exp100 > EXP100_SEND_DATA) {
|
||||
k->exp100 = EXP100_SEND_DATA;
|
||||
k->keepon |= KEEP_SEND;
|
||||
}
|
||||
http_exp100_send_anyway(data);
|
||||
}
|
||||
else {
|
||||
infof(data, "HTTP error before end of send, stop sending");
|
||||
@ -3652,8 +3637,6 @@ static CURLcode http_rw_headers(struct Curl_easy *data,
|
||||
result = Curl_req_abort_sending(data);
|
||||
if(result)
|
||||
return result;
|
||||
if(data->state.expect100header)
|
||||
k->exp100 = EXP100_FAILED;
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -4337,4 +4320,142 @@ void Curl_http_resp_free(struct http_resp *resp)
|
||||
}
|
||||
}
|
||||
|
||||
struct cr_exp100_ctx {
|
||||
struct Curl_creader super;
|
||||
struct curltime start; /* time started waiting */
|
||||
enum expect100 state;
|
||||
};
|
||||
|
||||
/* Expect: 100-continue client reader, blocking uploads */
|
||||
|
||||
static void http_exp100_continue(struct Curl_easy *data,
|
||||
struct Curl_creader *reader)
|
||||
{
|
||||
struct cr_exp100_ctx *ctx = reader->ctx;
|
||||
if(ctx->state > EXP100_SEND_DATA) {
|
||||
ctx->state = EXP100_SEND_DATA;
|
||||
data->req.keepon |= KEEP_SEND;
|
||||
data->req.keepon &= ~KEEP_SEND_TIMED;
|
||||
Curl_expire_done(data, EXPIRE_100_TIMEOUT);
|
||||
}
|
||||
}
|
||||
|
||||
static CURLcode cr_exp100_read(struct Curl_easy *data,
|
||||
struct Curl_creader *reader,
|
||||
char *buf, size_t blen,
|
||||
size_t *nread, bool *eos)
|
||||
{
|
||||
struct cr_exp100_ctx *ctx = reader->ctx;
|
||||
timediff_t ms;
|
||||
|
||||
switch(ctx->state) {
|
||||
case EXP100_SENDING_REQUEST:
|
||||
/* We are now waiting for a reply from the server or
|
||||
* a timeout on our side */
|
||||
DEBUGF(infof(data, "cr_exp100_read, start AWAITING_CONTINUE"));
|
||||
ctx->state = EXP100_AWAITING_CONTINUE;
|
||||
ctx->start = Curl_now();
|
||||
Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
|
||||
data->req.keepon &= ~KEEP_SEND;
|
||||
data->req.keepon |= KEEP_SEND_TIMED;
|
||||
*nread = 0;
|
||||
*eos = FALSE;
|
||||
return CURLE_OK;
|
||||
case EXP100_FAILED:
|
||||
DEBUGF(infof(data, "cr_exp100_read, expectation failed, error"));
|
||||
*nread = 0;
|
||||
*eos = FALSE;
|
||||
return CURLE_READ_ERROR;
|
||||
case EXP100_AWAITING_CONTINUE:
|
||||
ms = Curl_timediff(Curl_now(), ctx->start);
|
||||
if(ms < data->set.expect_100_timeout) {
|
||||
DEBUGF(infof(data, "cr_exp100_read, AWAITING_CONTINUE, not expired"));
|
||||
data->req.keepon &= ~KEEP_SEND;
|
||||
data->req.keepon |= KEEP_SEND_TIMED;
|
||||
*nread = 0;
|
||||
*eos = FALSE;
|
||||
return CURLE_OK;
|
||||
}
|
||||
/* we've waited long enough, continue anyway */
|
||||
http_exp100_continue(data, reader);
|
||||
infof(data, "Done waiting for 100-continue");
|
||||
FALLTHROUGH();
|
||||
default:
|
||||
DEBUGF(infof(data, "cr_exp100_read, pass through"));
|
||||
return Curl_creader_read(data, reader->next, buf, blen, nread, eos);
|
||||
}
|
||||
}
|
||||
|
||||
static void cr_exp100_done(struct Curl_easy *data,
|
||||
struct Curl_creader *reader, int premature)
|
||||
{
|
||||
struct cr_exp100_ctx *ctx = reader->ctx;
|
||||
ctx->state = premature? EXP100_FAILED : EXP100_SEND_DATA;
|
||||
data->req.keepon &= ~KEEP_SEND_TIMED;
|
||||
Curl_expire_done(data, EXPIRE_100_TIMEOUT);
|
||||
}
|
||||
|
||||
static const struct Curl_crtype cr_exp100 = {
|
||||
"cr-exp100",
|
||||
Curl_creader_def_init,
|
||||
cr_exp100_read,
|
||||
Curl_creader_def_close,
|
||||
Curl_creader_def_needs_rewind,
|
||||
Curl_creader_def_total_length,
|
||||
Curl_creader_def_resume_from,
|
||||
Curl_creader_def_rewind,
|
||||
Curl_creader_def_unpause,
|
||||
cr_exp100_done,
|
||||
sizeof(struct cr_exp100_ctx)
|
||||
};
|
||||
|
||||
static CURLcode http_exp100_add_reader(struct Curl_easy *data)
|
||||
{
|
||||
struct Curl_creader *reader = NULL;
|
||||
CURLcode result;
|
||||
|
||||
result = Curl_creader_create(&reader, data, &cr_exp100,
|
||||
CURL_CR_PROTOCOL);
|
||||
if(!result)
|
||||
result = Curl_creader_add(data, reader);
|
||||
if(!result) {
|
||||
struct cr_exp100_ctx *ctx = reader->ctx;
|
||||
ctx->state = EXP100_SENDING_REQUEST;
|
||||
}
|
||||
|
||||
if(result && reader)
|
||||
Curl_creader_free(data, reader);
|
||||
return result;
|
||||
}
|
||||
|
||||
void Curl_http_exp100_got100(struct Curl_easy *data)
|
||||
{
|
||||
struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
|
||||
if(r)
|
||||
http_exp100_continue(data, r);
|
||||
}
|
||||
|
||||
static bool http_exp100_is_waiting(struct Curl_easy *data)
|
||||
{
|
||||
struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
|
||||
if(r) {
|
||||
struct cr_exp100_ctx *ctx = r->ctx;
|
||||
return (ctx->state == EXP100_AWAITING_CONTINUE);
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
static void http_exp100_send_anyway(struct Curl_easy *data)
|
||||
{
|
||||
struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
|
||||
if(r)
|
||||
http_exp100_continue(data, r);
|
||||
}
|
||||
|
||||
bool Curl_http_exp100_is_selected(struct Curl_easy *data)
|
||||
{
|
||||
struct Curl_creader *r = Curl_creader_get_by_type(data, &cr_exp100);
|
||||
return r? TRUE : FALSE;
|
||||
}
|
||||
|
||||
#endif /* CURL_DISABLE_HTTP */
|
||||
|
@ -176,6 +176,9 @@ CURLcode Curl_http_auth_act(struct Curl_easy *data);
|
||||
version. This count includes CONNECT response headers. */
|
||||
#define MAX_HTTP_RESP_HEADER_SIZE (300*1024)
|
||||
|
||||
bool Curl_http_exp100_is_selected(struct Curl_easy *data);
|
||||
void Curl_http_exp100_got100(struct Curl_easy *data);
|
||||
|
||||
#endif /* CURL_DISABLE_HTTP */
|
||||
|
||||
/****************************************************************************
|
||||
|
@ -644,6 +644,7 @@ const struct Curl_crtype Curl_httpchunk_encoder = {
|
||||
Curl_creader_def_resume_from,
|
||||
Curl_creader_def_rewind,
|
||||
Curl_creader_def_unpause,
|
||||
Curl_creader_def_done,
|
||||
sizeof(struct chunked_reader)
|
||||
};
|
||||
|
||||
|
@ -2106,6 +2106,7 @@ static const struct Curl_crtype cr_mime = {
|
||||
cr_mime_resume_from,
|
||||
cr_mime_rewind,
|
||||
cr_mime_unpause,
|
||||
Curl_creader_def_done,
|
||||
sizeof(struct cr_mime_ctx)
|
||||
};
|
||||
|
||||
|
@ -134,8 +134,6 @@ void Curl_req_hard_reset(struct SingleRequest *req, struct Curl_easy *data)
|
||||
req->offset = 0;
|
||||
req->httpcode = 0;
|
||||
req->keepon = 0;
|
||||
req->start100 = t0;
|
||||
req->exp100 = EXP100_SEND_DATA;
|
||||
req->upgr101 = UPGR101_INIT;
|
||||
req->timeofdoc = 0;
|
||||
req->bodywrites = 0;
|
||||
@ -241,16 +239,6 @@ static CURLcode req_send_buffer_flush(struct Curl_easy *data)
|
||||
Curl_bufq_skip(&data->req.sendbuf, nwritten);
|
||||
if(hds_len) {
|
||||
data->req.sendbuf_hds_len -= CURLMIN(hds_len, nwritten);
|
||||
if(!data->req.sendbuf_hds_len) {
|
||||
/* all request headers sent */
|
||||
if(data->req.exp100 == EXP100_SENDING_REQUEST) {
|
||||
/* We are now waiting for a reply from the server or
|
||||
* a timeout on our side */
|
||||
data->req.exp100 = EXP100_AWAITING_CONTINUE;
|
||||
data->req.start100 = Curl_now();
|
||||
Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* leave if we could not send all. Maybe network blocking or
|
||||
* speed limits on transfer */
|
||||
@ -264,11 +252,9 @@ static CURLcode req_set_upload_done(struct Curl_easy *data)
|
||||
{
|
||||
DEBUGASSERT(!data->req.upload_done);
|
||||
data->req.upload_done = TRUE;
|
||||
data->req.keepon &= ~KEEP_SEND; /* we're done sending */
|
||||
data->req.keepon &= ~(KEEP_SEND|KEEP_SEND_TIMED); /* we're done sending */
|
||||
|
||||
/* FIXME: http specific stuff, need to go somewhere else */
|
||||
data->req.exp100 = EXP100_SEND_DATA;
|
||||
Curl_expire_done(data, EXPIRE_100_TIMEOUT);
|
||||
Curl_creader_done(data, data->req.upload_aborted);
|
||||
|
||||
if(data->req.upload_aborted) {
|
||||
if(data->req.writebytecount)
|
||||
@ -384,10 +370,8 @@ CURLcode Curl_req_send_more(struct Curl_easy *data)
|
||||
{
|
||||
CURLcode result;
|
||||
|
||||
/* Fill our send buffer if more from client can be read and
|
||||
* we are not in a "expect-100" situation. */
|
||||
if(!data->req.eos_read && !Curl_bufq_is_full(&data->req.sendbuf) &&
|
||||
(data->req.exp100 == EXP100_SEND_DATA)) {
|
||||
/* Fill our send buffer if more from client can be read. */
|
||||
if(!data->req.eos_read && !Curl_bufq_is_full(&data->req.sendbuf)) {
|
||||
ssize_t nread = Curl_bufq_sipn(&data->req.sendbuf, 0,
|
||||
add_from_client, data, &result);
|
||||
if(nread < 0 && result != CURLE_AGAIN)
|
||||
|
@ -81,8 +81,6 @@ struct SingleRequest {
|
||||
int httpcode; /* error code from the 'HTTP/1.? XXX' or
|
||||
'RTSP/1.? XXX' line */
|
||||
int keepon;
|
||||
struct curltime start100; /* time stamp to wait for the 100 code from */
|
||||
enum expect100 exp100; /* expect 100 continue state */
|
||||
enum upgrade101 upgr101; /* 101 upgrade state */
|
||||
|
||||
/* Client Writer stack, handles transfer- and content-encodings, protocol
|
||||
|
@ -555,8 +555,6 @@ static CURLcode rtsp_do(struct Curl_easy *data, bool *done)
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
data->state.expect100header = FALSE; /* RTSP posts are simple/small */
|
||||
}
|
||||
else if(rtspreq == RTSPREQ_GET_PARAMETER) {
|
||||
/* Check for an empty GET_PARAMETER (heartbeat) request */
|
||||
|
33
lib/sendf.c
33
lib/sendf.c
@ -579,6 +579,14 @@ CURLcode Curl_creader_def_unpause(struct Curl_easy *data,
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
void Curl_creader_def_done(struct Curl_easy *data,
|
||||
struct Curl_creader *reader, int premature)
|
||||
{
|
||||
(void)data;
|
||||
(void)reader;
|
||||
(void)premature;
|
||||
}
|
||||
|
||||
struct cr_in_ctx {
|
||||
struct Curl_creader super;
|
||||
curl_read_callback read_cb;
|
||||
@ -840,6 +848,7 @@ static const struct Curl_crtype cr_in = {
|
||||
cr_in_resume_from,
|
||||
cr_in_rewind,
|
||||
Curl_creader_def_unpause,
|
||||
Curl_creader_def_done,
|
||||
sizeof(struct cr_in_ctx)
|
||||
};
|
||||
|
||||
@ -990,6 +999,7 @@ static const struct Curl_crtype cr_lc = {
|
||||
Curl_creader_def_resume_from,
|
||||
Curl_creader_def_rewind,
|
||||
Curl_creader_def_unpause,
|
||||
Curl_creader_def_done,
|
||||
sizeof(struct cr_lc_ctx)
|
||||
};
|
||||
|
||||
@ -1154,6 +1164,7 @@ static const struct Curl_crtype cr_null = {
|
||||
Curl_creader_def_resume_from,
|
||||
Curl_creader_def_rewind,
|
||||
Curl_creader_def_unpause,
|
||||
Curl_creader_def_done,
|
||||
sizeof(struct Curl_creader)
|
||||
};
|
||||
|
||||
@ -1250,6 +1261,7 @@ static const struct Curl_crtype cr_buf = {
|
||||
cr_buf_resume_from,
|
||||
Curl_creader_def_rewind,
|
||||
Curl_creader_def_unpause,
|
||||
Curl_creader_def_done,
|
||||
sizeof(struct cr_buf_ctx)
|
||||
};
|
||||
|
||||
@ -1307,3 +1319,24 @@ CURLcode Curl_creader_unpause(struct Curl_easy *data)
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
void Curl_creader_done(struct Curl_easy *data, int premature)
|
||||
{
|
||||
struct Curl_creader *reader = data->req.reader_stack;
|
||||
while(reader) {
|
||||
reader->crt->done(data, reader, premature);
|
||||
reader = reader->next;
|
||||
}
|
||||
}
|
||||
|
||||
struct Curl_creader *Curl_creader_get_by_type(struct Curl_easy *data,
|
||||
const struct Curl_crtype *crt)
|
||||
{
|
||||
struct Curl_creader *r;
|
||||
for(r = data->req.reader_stack; r; r = r->next) {
|
||||
if(r->crt == crt)
|
||||
return r;
|
||||
}
|
||||
return NULL;
|
||||
|
||||
}
|
||||
|
17
lib/sendf.h
17
lib/sendf.h
@ -208,6 +208,8 @@ struct Curl_crtype {
|
||||
struct Curl_creader *reader, curl_off_t offset);
|
||||
CURLcode (*rewind)(struct Curl_easy *data, struct Curl_creader *reader);
|
||||
CURLcode (*unpause)(struct Curl_easy *data, struct Curl_creader *reader);
|
||||
void (*done)(struct Curl_easy *data,
|
||||
struct Curl_creader *reader, int premature);
|
||||
size_t creader_size; /* sizeof() allocated struct Curl_creader */
|
||||
};
|
||||
|
||||
@ -256,6 +258,8 @@ CURLcode Curl_creader_def_rewind(struct Curl_easy *data,
|
||||
struct Curl_creader *reader);
|
||||
CURLcode Curl_creader_def_unpause(struct Curl_easy *data,
|
||||
struct Curl_creader *reader);
|
||||
void Curl_creader_def_done(struct Curl_easy *data,
|
||||
struct Curl_creader *reader, int premature);
|
||||
|
||||
/**
|
||||
* Convenience method for calling `reader->do_read()` that
|
||||
@ -361,6 +365,19 @@ CURLcode Curl_creader_resume_from(struct Curl_easy *data, curl_off_t offset);
|
||||
*/
|
||||
CURLcode Curl_creader_unpause(struct Curl_easy *data);
|
||||
|
||||
/**
|
||||
* Tell all client readers that they are done.
|
||||
*/
|
||||
void Curl_creader_done(struct Curl_easy *data, int premature);
|
||||
|
||||
/**
|
||||
* Look up an installed client reader on `data` by its type.
|
||||
* @return first reader with that type or NULL
|
||||
*/
|
||||
struct Curl_creader *Curl_creader_get_by_type(struct Curl_easy *data,
|
||||
const struct Curl_crtype *crt);
|
||||
|
||||
|
||||
/**
|
||||
* Set the client reader to provide 0 bytes, immediate EOS.
|
||||
*/
|
||||
|
@ -1923,6 +1923,7 @@ static const struct Curl_crtype cr_eob = {
|
||||
Curl_creader_def_resume_from,
|
||||
Curl_creader_def_rewind,
|
||||
Curl_creader_def_unpause,
|
||||
Curl_creader_def_done,
|
||||
sizeof(struct cr_eob_ctx)
|
||||
};
|
||||
|
||||
|
@ -463,7 +463,8 @@ CURLcode Curl_readwrite(struct Curl_easy *data)
|
||||
}
|
||||
|
||||
/* If we still have writing to do, we check if we have a writable socket. */
|
||||
if((k->keepon & KEEP_SEND) && (select_bits & CURL_CSELECT_OUT)) {
|
||||
if(((k->keepon & KEEP_SEND) && (select_bits & CURL_CSELECT_OUT)) ||
|
||||
(k->keepon & KEEP_SEND_TIMED)) {
|
||||
/* write */
|
||||
|
||||
result = readwrite_upload(data, &didwhat);
|
||||
@ -476,31 +477,6 @@ CURLcode Curl_readwrite(struct Curl_easy *data)
|
||||
|
||||
now = Curl_now();
|
||||
if(!didwhat) {
|
||||
/* no read no write, this is a timeout? */
|
||||
if(k->exp100 == EXP100_AWAITING_CONTINUE) {
|
||||
/* This should allow some time for the header to arrive, but only a
|
||||
very short time as otherwise it'll be too much wasted time too
|
||||
often. */
|
||||
|
||||
/* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
|
||||
|
||||
Therefore, when a client sends this header field to an origin server
|
||||
(possibly via a proxy) from which it has never seen a 100 (Continue)
|
||||
status, the client SHOULD NOT wait for an indefinite period before
|
||||
sending the request body.
|
||||
|
||||
*/
|
||||
|
||||
timediff_t ms = Curl_timediff(now, k->start100);
|
||||
if(ms >= data->set.expect_100_timeout) {
|
||||
/* we've waited long enough, continue anyway */
|
||||
k->exp100 = EXP100_SEND_DATA;
|
||||
k->keepon |= KEEP_SEND;
|
||||
Curl_expire_done(data, EXPIRE_100_TIMEOUT);
|
||||
infof(data, "Done waiting for 100-continue");
|
||||
}
|
||||
}
|
||||
|
||||
result = Curl_conn_ev_data_idle(data);
|
||||
if(result)
|
||||
goto out;
|
||||
@ -1172,36 +1148,8 @@ void Curl_xfer_setup(
|
||||
if(sockindex != -1)
|
||||
k->keepon |= KEEP_RECV;
|
||||
|
||||
if(writesockindex != -1) {
|
||||
/* HTTP 1.1 magic:
|
||||
|
||||
Even if we require a 100-return code before uploading data, we might
|
||||
need to write data before that since the REQUEST may not have been
|
||||
finished sent off just yet.
|
||||
|
||||
Thus, we must check if the request has been sent before we set the
|
||||
state info where we wait for the 100-return code
|
||||
*/
|
||||
if((data->state.expect100header) &&
|
||||
(conn->handler->protocol&PROTO_FAMILY_HTTP)) {
|
||||
/* wait with write until we either got 100-continue or a timeout */
|
||||
k->exp100 = EXP100_AWAITING_CONTINUE;
|
||||
k->start100 = Curl_now();
|
||||
|
||||
/* Set a timeout for the multi interface. Add the inaccuracy margin so
|
||||
that we don't fire slightly too early and get denied to run. */
|
||||
Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
|
||||
}
|
||||
else {
|
||||
if(data->state.expect100header)
|
||||
/* when we've sent off the rest of the headers, we must await a
|
||||
100-continue but first finish sending the request */
|
||||
k->exp100 = EXP100_SENDING_REQUEST;
|
||||
|
||||
/* enable the write bit when we're not waiting for continue */
|
||||
k->keepon |= KEEP_SEND;
|
||||
}
|
||||
} /* if(writesockindex != -1) */
|
||||
if(writesockindex != -1)
|
||||
k->keepon |= KEEP_SEND;
|
||||
} /* if(k->getheader || !data->req.no_body) */
|
||||
|
||||
}
|
||||
|
@ -3898,7 +3898,6 @@ CURLcode Curl_init_do(struct Curl_easy *data, struct connectdata *conn)
|
||||
}
|
||||
|
||||
data->state.done = FALSE; /* *_done() is not called yet */
|
||||
data->state.expect100header = FALSE;
|
||||
|
||||
if(data->req.no_body)
|
||||
/* in HTTP lingo, no body means using the HEAD request... */
|
||||
|
@ -579,6 +579,14 @@ struct hostname {
|
||||
#define KEEP_RECV_PAUSE (1<<4) /* reading is paused */
|
||||
#define KEEP_SEND_PAUSE (1<<5) /* writing is paused */
|
||||
|
||||
/* KEEP_SEND_TIMED is set when the transfer should attempt sending
|
||||
* at timer (or other) events. A transfer waiting on a timer will
|
||||
* remove KEEP_SEND to suppress POLLOUTs of the connection.
|
||||
* Adding KEEP_SEND_TIMED will then attempt to send whenever the transfer
|
||||
* enters the "readwrite" loop, e.g. when a timer fires.
|
||||
* This is used in HTTP for 'Expect: 100-continue' waiting. */
|
||||
#define KEEP_SEND_TIMED (1<<6)
|
||||
|
||||
#define KEEP_RECVBITS (KEEP_RECV | KEEP_RECV_HOLD | KEEP_RECV_PAUSE)
|
||||
#define KEEP_SENDBITS (KEEP_SEND | KEEP_SEND_HOLD | KEEP_SEND_PAUSE)
|
||||
|
||||
@ -1364,7 +1372,6 @@ struct UrlState {
|
||||
BIT(authproblem); /* TRUE if there's some problem authenticating */
|
||||
/* set after initial USER failure, to prevent an authentication loop */
|
||||
BIT(wildcardmatch); /* enable wildcard matching */
|
||||
BIT(expect100header); /* TRUE if we added Expect: 100-continue */
|
||||
BIT(disableexpect); /* TRUE if Expect: is disabled due to a previous
|
||||
417 response */
|
||||
BIT(use_range);
|
||||
|
@ -46,6 +46,7 @@ class TestUpload:
|
||||
env.make_data_file(indir=env.gen_dir, fname="data-63k", fsize=63*1024)
|
||||
env.make_data_file(indir=env.gen_dir, fname="data-64k", fsize=64*1024)
|
||||
env.make_data_file(indir=env.gen_dir, fname="data-100k", fsize=100*1024)
|
||||
env.make_data_file(indir=env.gen_dir, fname="data-1m+", fsize=(1024*1024)+1)
|
||||
env.make_data_file(indir=env.gen_dir, fname="data-10m", fsize=10*1024*1024)
|
||||
httpd.clear_extra_configs()
|
||||
httpd.reload()
|
||||
@ -501,3 +502,29 @@ class TestUpload:
|
||||
up_speed = r.stats[0]['speed_upload']
|
||||
assert (speed_limit * 0.5) <= up_speed <= (speed_limit * 1.5), f'{r.stats[0]}'
|
||||
|
||||
# upload larger data, triggering "Expect: 100-continue" code paths
|
||||
@pytest.mark.parametrize("proto", ['http/1.1'])
|
||||
def test_07_60_upload_exp100(self, env: Env, httpd, nghttpx, repeat, proto):
|
||||
fdata = os.path.join(env.gen_dir, 'data-1m+')
|
||||
read_delay = 1
|
||||
curl = CurlClient(env=env)
|
||||
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-0]'\
|
||||
f'&read_delay={read_delay}s'
|
||||
r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto, extra_args=[
|
||||
'--expect100-timeout', f'{read_delay+1}'
|
||||
])
|
||||
r.check_stats(count=1, http_status=200, exitcode=0)
|
||||
|
||||
# upload larger data, triggering "Expect: 100-continue" code paths
|
||||
@pytest.mark.parametrize("proto", ['http/1.1'])
|
||||
def test_07_61_upload_exp100_timeout(self, env: Env, httpd, nghttpx, repeat, proto):
|
||||
fdata = os.path.join(env.gen_dir, 'data-1m+')
|
||||
read_delay = 2
|
||||
curl = CurlClient(env=env)
|
||||
url = f'https://{env.authority_for(env.domain1, proto)}/curltest/put?id=[0-0]'\
|
||||
f'&read_delay={read_delay}s'
|
||||
r = curl.http_put(urls=[url], fdata=fdata, alpn_proto=proto, extra_args=[
|
||||
'--expect100-timeout', f'{read_delay-1}'
|
||||
])
|
||||
r.check_stats(count=1, http_status=200, exitcode=0)
|
||||
|
||||
|
@ -425,7 +425,7 @@ static int curltest_put_handler(request_rec *r)
|
||||
apr_off_t rbody_len = 0;
|
||||
const char *s_rbody_len;
|
||||
const char *request_id = "none";
|
||||
apr_time_t chunk_delay = 0;
|
||||
apr_time_t read_delay = 0, chunk_delay = 0;
|
||||
apr_array_header_t *args = NULL;
|
||||
long l;
|
||||
int i;
|
||||
@ -450,6 +450,12 @@ static int curltest_put_handler(request_rec *r)
|
||||
request_id = val;
|
||||
continue;
|
||||
}
|
||||
else if(!strcmp("read_delay", arg)) {
|
||||
rv = duration_parse(&read_delay, val, "s");
|
||||
if(APR_SUCCESS == rv) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else if(!strcmp("chunk_delay", arg)) {
|
||||
rv = duration_parse(&chunk_delay, val, "s");
|
||||
if(APR_SUCCESS == rv) {
|
||||
@ -478,6 +484,9 @@ static int curltest_put_handler(request_rec *r)
|
||||
ct = apr_table_get(r->headers_in, "content-type");
|
||||
ap_set_content_type(r, ct? ct : "text/plain");
|
||||
|
||||
if(read_delay) {
|
||||
apr_sleep(read_delay);
|
||||
}
|
||||
bb = apr_brigade_create(r->pool, c->bucket_alloc);
|
||||
/* copy any request body into the response */
|
||||
if((rv = ap_setup_client_block(r, REQUEST_CHUNKED_DECHUNK))) goto cleanup;
|
||||
|
Loading…
x
Reference in New Issue
Block a user