mirror of
https://github.com/curl/curl.git
synced 2024-12-27 06:59:43 +08:00
14bcea074a
- update client reader documentation - client reader, add rewind capabilities - tell creader to rewind on next start - Curl_client_reset() will keep reader for future rewind if requested - add Curl_client_cleanup() for freeing all resources independent of rewinds - add Curl_client_start() to trigger rewinds - move rewind code from multi.c to sendf.c and make part of "cr-in"'s implementation - http, move the "resume_from" handling into the client readers - the setup of a HTTP request is reshuffled to follow: * determine method, target, auth negotiation * install the client reader(s) for the request, including crlf conversions and "chunked" encoding * apply ranges to client reader * concat request headers, upgrades, cookies, etc. * complete request by determining Content-Length of installed readers in combination with method * send - add methods for client readers to * return the overall length they will generate (or -1 when unknown) * return the amount of data on the CLIENT level, so that expect-100 can decide if it want to apply itself * set a "resume_from" offset or fail if unsupported - struct HTTP has become largely empty now - rename `Client_reader_*` to `Curl_creader_*` Closes #13026
393 lines
11 KiB
C
393 lines
11 KiB
C
/***************************************************************************
|
|
* _ _ ____ _
|
|
* Project ___| | | | _ \| |
|
|
* / __| | | | |_) | |
|
|
* | (__| |_| | _ <| |___
|
|
* \___|\___/|_| \_\_____|
|
|
*
|
|
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
|
|
*
|
|
* This software is licensed as described in the file COPYING, which
|
|
* you should have received as part of this distribution. The terms
|
|
* are also available at https://curl.se/docs/copyright.html.
|
|
*
|
|
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
|
* copies of the Software, and permit persons to whom the Software is
|
|
* furnished to do so, under the terms of the COPYING file.
|
|
*
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
* KIND, either express or implied.
|
|
*
|
|
* SPDX-License-Identifier: curl
|
|
*
|
|
***************************************************************************/
|
|
|
|
#include "curl_setup.h"
|
|
|
|
#include "urldata.h"
|
|
#include "cfilters.h"
|
|
#include "dynbuf.h"
|
|
#include "doh.h"
|
|
#include "multiif.h"
|
|
#include "progress.h"
|
|
#include "request.h"
|
|
#include "sendf.h"
|
|
#include "transfer.h"
|
|
#include "url.h"
|
|
|
|
/* The last 3 #include files should be in this order */
|
|
#include "curl_printf.h"
|
|
#include "curl_memory.h"
|
|
#include "memdebug.h"
|
|
|
|
CURLcode Curl_req_init(struct SingleRequest *req)
|
|
{
|
|
memset(req, 0, sizeof(*req));
|
|
return CURLE_OK;
|
|
}
|
|
|
|
CURLcode Curl_req_start(struct SingleRequest *req,
|
|
struct Curl_easy *data)
|
|
{
|
|
CURLcode result;
|
|
|
|
req->start = Curl_now();
|
|
result = Curl_client_start(data);
|
|
if(result)
|
|
return result;
|
|
|
|
if(!req->sendbuf_init) {
|
|
Curl_bufq_init2(&req->sendbuf, data->set.upload_buffer_size, 1,
|
|
BUFQ_OPT_SOFT_LIMIT);
|
|
req->sendbuf_init = TRUE;
|
|
}
|
|
else {
|
|
Curl_bufq_reset(&req->sendbuf);
|
|
if(data->set.upload_buffer_size != req->sendbuf.chunk_size) {
|
|
Curl_bufq_free(&req->sendbuf);
|
|
Curl_bufq_init2(&req->sendbuf, data->set.upload_buffer_size, 1,
|
|
BUFQ_OPT_SOFT_LIMIT);
|
|
}
|
|
}
|
|
|
|
return CURLE_OK;
|
|
}
|
|
|
|
static CURLcode req_flush(struct Curl_easy *data);
|
|
|
|
CURLcode Curl_req_done(struct SingleRequest *req,
|
|
struct Curl_easy *data, bool aborted)
|
|
{
|
|
(void)req;
|
|
if(!aborted)
|
|
(void)req_flush(data);
|
|
Curl_client_reset(data);
|
|
return CURLE_OK;
|
|
}
|
|
|
|
void Curl_req_reset(struct SingleRequest *req, struct Curl_easy *data)
|
|
{
|
|
struct curltime t0 = {0, 0};
|
|
|
|
/* This is a bit ugly. `req->p` is a union and we assume we can
|
|
* free this safely without leaks. */
|
|
Curl_safefree(req->p.http);
|
|
Curl_safefree(req->newurl);
|
|
Curl_client_reset(data);
|
|
if(req->sendbuf_init)
|
|
Curl_bufq_reset(&req->sendbuf);
|
|
|
|
#ifndef CURL_DISABLE_DOH
|
|
if(req->doh) {
|
|
Curl_close(&req->doh->probe[0].easy);
|
|
Curl_close(&req->doh->probe[1].easy);
|
|
}
|
|
#endif
|
|
/* Can no longer memset() this struct as we need to keep some state */
|
|
req->size = -1;
|
|
req->maxdownload = -1;
|
|
req->bytecount = 0;
|
|
req->writebytecount = 0;
|
|
req->start = t0;
|
|
req->headerbytecount = 0;
|
|
req->allheadercount = 0;
|
|
req->deductheadercount = 0;
|
|
req->headerline = 0;
|
|
req->offset = 0;
|
|
req->httpcode = 0;
|
|
req->keepon = 0;
|
|
req->start100 = t0;
|
|
req->exp100 = EXP100_SEND_DATA;
|
|
req->upgr101 = UPGR101_INIT;
|
|
req->timeofdoc = 0;
|
|
req->bodywrites = 0;
|
|
req->location = NULL;
|
|
req->newurl = NULL;
|
|
#ifndef CURL_DISABLE_COOKIES
|
|
req->setcookies = 0;
|
|
#endif
|
|
req->header = FALSE;
|
|
req->content_range = FALSE;
|
|
req->download_done = FALSE;
|
|
req->eos_written = FALSE;
|
|
req->eos_read = FALSE;
|
|
req->upload_done = FALSE;
|
|
req->upload_aborted = FALSE;
|
|
req->ignorebody = FALSE;
|
|
req->http_bodyless = FALSE;
|
|
req->chunk = FALSE;
|
|
req->ignore_cl = FALSE;
|
|
req->upload_chunky = FALSE;
|
|
req->getheader = FALSE;
|
|
req->forbidchunk = FALSE;
|
|
req->no_body = data->set.opt_no_body;
|
|
req->authneg = FALSE;
|
|
}
|
|
|
|
void Curl_req_free(struct SingleRequest *req, struct Curl_easy *data)
|
|
{
|
|
/* This is a bit ugly. `req->p` is a union and we assume we can
|
|
* free this safely without leaks. */
|
|
Curl_safefree(req->p.http);
|
|
Curl_safefree(req->newurl);
|
|
if(req->sendbuf_init)
|
|
Curl_bufq_free(&req->sendbuf);
|
|
Curl_client_cleanup(data);
|
|
|
|
#ifndef CURL_DISABLE_DOH
|
|
if(req->doh) {
|
|
Curl_close(&req->doh->probe[0].easy);
|
|
Curl_close(&req->doh->probe[1].easy);
|
|
Curl_dyn_free(&req->doh->probe[0].serverdoh);
|
|
Curl_dyn_free(&req->doh->probe[1].serverdoh);
|
|
curl_slist_free_all(req->doh->headers);
|
|
Curl_safefree(req->doh);
|
|
}
|
|
#endif
|
|
}
|
|
|
|
static CURLcode xfer_send(struct Curl_easy *data,
|
|
const char *buf, size_t blen,
|
|
size_t hds_len, size_t *pnwritten)
|
|
{
|
|
CURLcode result = CURLE_OK;
|
|
|
|
*pnwritten = 0;
|
|
#ifdef CURLDEBUG
|
|
{
|
|
/* Allow debug builds to override this logic to force short initial
|
|
sends
|
|
*/
|
|
char *p = getenv("CURL_SMALLREQSEND");
|
|
if(p) {
|
|
size_t altsize = (size_t)strtoul(p, NULL, 10);
|
|
if(altsize && altsize < blen)
|
|
blen = altsize;
|
|
}
|
|
}
|
|
#endif
|
|
/* Make sure this doesn't send more body bytes than what the max send
|
|
speed says. The headers do not count to the max speed. */
|
|
if(data->set.max_send_speed) {
|
|
size_t body_bytes = blen - hds_len;
|
|
if((curl_off_t)body_bytes > data->set.max_send_speed)
|
|
blen = hds_len + (size_t)data->set.max_send_speed;
|
|
}
|
|
|
|
result = Curl_xfer_send(data, buf, blen, pnwritten);
|
|
if(!result && *pnwritten) {
|
|
if(hds_len)
|
|
Curl_debug(data, CURLINFO_HEADER_OUT, (char *)buf,
|
|
CURLMIN(hds_len, *pnwritten));
|
|
if(*pnwritten > hds_len) {
|
|
size_t body_len = *pnwritten - hds_len;
|
|
Curl_debug(data, CURLINFO_DATA_OUT, (char *)buf + hds_len, body_len);
|
|
data->req.writebytecount += body_len;
|
|
Curl_pgrsSetUploadCounter(data, data->req.writebytecount);
|
|
}
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static CURLcode req_send_buffer_flush(struct Curl_easy *data)
|
|
{
|
|
CURLcode result = CURLE_OK;
|
|
const unsigned char *buf;
|
|
size_t blen;
|
|
|
|
while(Curl_bufq_peek(&data->req.sendbuf, &buf, &blen)) {
|
|
size_t nwritten, hds_len = CURLMIN(data->req.sendbuf_hds_len, blen);
|
|
result = xfer_send(data, (const char *)buf, blen, hds_len, &nwritten);
|
|
if(result)
|
|
break;
|
|
|
|
Curl_bufq_skip(&data->req.sendbuf, nwritten);
|
|
if(hds_len) {
|
|
data->req.sendbuf_hds_len -= CURLMIN(hds_len, nwritten);
|
|
if(!data->req.sendbuf_hds_len) {
|
|
/* all request headers sent */
|
|
if(data->req.exp100 == EXP100_SENDING_REQUEST) {
|
|
/* We are now waiting for a reply from the server or
|
|
* a timeout on our side */
|
|
data->req.exp100 = EXP100_AWAITING_CONTINUE;
|
|
data->req.start100 = Curl_now();
|
|
Curl_expire(data, data->set.expect_100_timeout, EXPIRE_100_TIMEOUT);
|
|
}
|
|
}
|
|
}
|
|
/* leave if we could not send all. Maybe network blocking or
|
|
* speed limits on transfer */
|
|
if(nwritten < blen)
|
|
break;
|
|
}
|
|
return result;
|
|
}
|
|
|
|
static CURLcode req_set_upload_done(struct Curl_easy *data)
|
|
{
|
|
DEBUGASSERT(!data->req.upload_done);
|
|
data->req.upload_done = TRUE;
|
|
data->req.keepon &= ~KEEP_SEND; /* we're done sending */
|
|
|
|
/* FIXME: http specific stuff, need to go somewhere else */
|
|
data->req.exp100 = EXP100_SEND_DATA;
|
|
Curl_expire_done(data, EXPIRE_100_TIMEOUT);
|
|
|
|
if(data->req.upload_aborted) {
|
|
if(data->req.writebytecount)
|
|
infof(data, "abort upload after having sent %" CURL_FORMAT_CURL_OFF_T
|
|
" bytes", data->req.writebytecount);
|
|
else
|
|
infof(data, "abort upload");
|
|
}
|
|
else if(data->req.writebytecount)
|
|
infof(data, "upload completely sent off: %" CURL_FORMAT_CURL_OFF_T
|
|
" bytes", data->req.writebytecount);
|
|
else
|
|
infof(data, "We are completely uploaded and fine");
|
|
|
|
return Curl_xfer_send_close(data);
|
|
}
|
|
|
|
static CURLcode req_flush(struct Curl_easy *data)
|
|
{
|
|
CURLcode result;
|
|
|
|
if(!data || !data->conn)
|
|
return CURLE_FAILED_INIT;
|
|
|
|
if(!Curl_bufq_is_empty(&data->req.sendbuf)) {
|
|
result = req_send_buffer_flush(data);
|
|
if(result)
|
|
return result;
|
|
if(!Curl_bufq_is_empty(&data->req.sendbuf)) {
|
|
return CURLE_AGAIN;
|
|
}
|
|
}
|
|
|
|
if(!data->req.upload_done && data->req.eos_read &&
|
|
Curl_bufq_is_empty(&data->req.sendbuf)) {
|
|
return req_set_upload_done(data);
|
|
}
|
|
return CURLE_OK;
|
|
}
|
|
|
|
static ssize_t add_from_client(void *reader_ctx,
|
|
unsigned char *buf, size_t buflen,
|
|
CURLcode *err)
|
|
{
|
|
struct Curl_easy *data = reader_ctx;
|
|
size_t nread;
|
|
bool eos;
|
|
|
|
*err = Curl_client_read(data, (char *)buf, buflen, &nread, &eos);
|
|
if(*err)
|
|
return -1;
|
|
if(eos)
|
|
data->req.eos_read = TRUE;
|
|
return (ssize_t)nread;
|
|
}
|
|
|
|
#ifndef USE_HYPER
|
|
|
|
static CURLcode req_send_buffer_add(struct Curl_easy *data,
|
|
const char *buf, size_t blen,
|
|
size_t hds_len)
|
|
{
|
|
CURLcode result = CURLE_OK;
|
|
ssize_t n;
|
|
n = Curl_bufq_write(&data->req.sendbuf,
|
|
(const unsigned char *)buf, blen, &result);
|
|
if(n < 0)
|
|
return result;
|
|
/* We rely on a SOFTLIMIT on sendbuf, so it can take all data in */
|
|
DEBUGASSERT((size_t)n == blen);
|
|
data->req.sendbuf_hds_len += hds_len;
|
|
return CURLE_OK;
|
|
}
|
|
|
|
CURLcode Curl_req_send(struct Curl_easy *data, struct dynbuf *buf)
|
|
{
|
|
CURLcode result;
|
|
|
|
if(!data || !data->conn)
|
|
return CURLE_FAILED_INIT;
|
|
|
|
/* We always buffer and send from there. The reason is that on
|
|
* blocking, we can retry using the same memory address. This is
|
|
* important for TLS libraries that expect this.
|
|
* We *could* optimized for non-TLS transfers, but that would mean
|
|
* separate code paths and seems not worth it. */
|
|
result = req_send_buffer_add(data, Curl_dyn_ptr(buf), Curl_dyn_len(buf),
|
|
Curl_dyn_len(buf));
|
|
if(result)
|
|
return result;
|
|
|
|
return Curl_req_send_more(data);
|
|
}
|
|
#endif /* !USE_HYPER */
|
|
|
|
bool Curl_req_want_send(struct Curl_easy *data)
|
|
{
|
|
return data->req.sendbuf_init && !Curl_bufq_is_empty(&data->req.sendbuf);
|
|
}
|
|
|
|
bool Curl_req_done_sending(struct Curl_easy *data)
|
|
{
|
|
if(data->req.upload_done) {
|
|
DEBUGASSERT(Curl_bufq_is_empty(&data->req.sendbuf));
|
|
return TRUE;
|
|
}
|
|
return FALSE;
|
|
}
|
|
|
|
CURLcode Curl_req_send_more(struct Curl_easy *data)
|
|
{
|
|
CURLcode result;
|
|
|
|
/* Fill our send buffer if more from client can be read and
|
|
* we are not in a "expect-100" situation. */
|
|
if(!data->req.eos_read && !Curl_bufq_is_full(&data->req.sendbuf) &&
|
|
(data->req.exp100 == EXP100_SEND_DATA)) {
|
|
ssize_t nread = Curl_bufq_sipn(&data->req.sendbuf, 0,
|
|
add_from_client, data, &result);
|
|
if(nread < 0 && result != CURLE_AGAIN)
|
|
return result;
|
|
}
|
|
|
|
result = req_flush(data);
|
|
if(result == CURLE_AGAIN)
|
|
result = CURLE_OK;
|
|
return result;
|
|
}
|
|
|
|
CURLcode Curl_req_abort_sending(struct Curl_easy *data)
|
|
{
|
|
if(!data->req.upload_done) {
|
|
Curl_bufq_reset(&data->req.sendbuf);
|
|
data->req.upload_aborted = TRUE;
|
|
return req_set_upload_done(data);
|
|
}
|
|
return CURLE_OK;
|
|
}
|