2002-09-03 19:52:59 +08:00
|
|
|
/***************************************************************************
|
2004-05-21 04:35:42 +08:00
|
|
|
* _ _ ____ _
|
|
|
|
* Project ___| | | | _ \| |
|
|
|
|
* / __| | | | |_) | |
|
|
|
|
* | (__| |_| | _ <| |___
|
1999-12-29 22:20:26 +08:00
|
|
|
* \___|\___/|_| \_\_____|
|
|
|
|
*
|
2023-01-02 20:51:48 +08:00
|
|
|
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
|
1999-12-29 22:20:26 +08:00
|
|
|
*
|
2002-09-03 19:52:59 +08:00
|
|
|
* This software is licensed as described in the file COPYING, which
|
|
|
|
* you should have received as part of this distribution. The terms
|
2020-11-04 21:02:01 +08:00
|
|
|
* are also available at https://curl.se/docs/copyright.html.
|
2004-05-21 04:35:42 +08:00
|
|
|
*
|
2001-01-03 17:29:33 +08:00
|
|
|
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
|
|
|
* copies of the Software, and permit persons to whom the Software is
|
2002-09-03 19:52:59 +08:00
|
|
|
* furnished to do so, under the terms of the COPYING file.
|
1999-12-29 22:20:26 +08:00
|
|
|
*
|
2001-01-03 17:29:33 +08:00
|
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
|
|
* KIND, either express or implied.
|
1999-12-29 22:20:26 +08:00
|
|
|
*
|
2002-09-03 19:52:59 +08:00
|
|
|
* SPDX-License-Identifier: curl
|
2022-05-17 17:16:50 +08:00
|
|
|
*
|
2002-09-03 19:52:59 +08:00
|
|
|
***************************************************************************/
|
1999-12-29 22:20:26 +08:00
|
|
|
|
2013-01-07 02:06:49 +08:00
|
|
|
#include "curl_setup.h"
|
1999-12-29 22:20:26 +08:00
|
|
|
|
2013-01-04 09:50:28 +08:00
|
|
|
#include "urldata.h"
|
|
|
|
#include "sendf.h"
|
2018-02-10 22:13:15 +08:00
|
|
|
#include "multiif.h"
|
2013-01-04 09:50:28 +08:00
|
|
|
#include "progress.h"
|
2019-07-31 21:30:31 +08:00
|
|
|
#include "timeval.h"
|
2015-03-03 19:36:18 +08:00
|
|
|
#include "curl_printf.h"
|
2002-05-03 20:06:04 +08:00
|
|
|
|
2018-03-15 23:43:00 +08:00
|
|
|
/* check rate limits within this many recent milliseconds, at minimum. */
|
|
|
|
#define MIN_RATE_LIMIT_PERIOD 3000
|
|
|
|
|
2019-02-11 23:38:19 +08:00
|
|
|
#ifndef CURL_DISABLE_PROGRESS_METER
|
2004-03-23 19:43:34 +08:00
|
|
|
/* Provide a string that is 2 + 1 + 2 + 1 + 2 = 8 letters long (plus the zero
|
|
|
|
byte) */
|
2008-10-11 09:56:04 +08:00
|
|
|
static void time2str(char *r, curl_off_t seconds)
|
2000-02-15 07:15:08 +08:00
|
|
|
{
|
2018-06-03 04:52:56 +08:00
|
|
|
curl_off_t h;
|
2008-10-11 09:56:04 +08:00
|
|
|
if(seconds <= 0) {
|
2004-03-23 19:43:34 +08:00
|
|
|
strcpy(r, "--:--:--");
|
|
|
|
return;
|
|
|
|
}
|
2008-10-11 09:56:04 +08:00
|
|
|
h = seconds / CURL_OFF_T_C(3600);
|
|
|
|
if(h <= CURL_OFF_T_C(99)) {
|
2018-06-03 04:52:56 +08:00
|
|
|
curl_off_t m = (seconds - (h*CURL_OFF_T_C(3600))) / CURL_OFF_T_C(60);
|
|
|
|
curl_off_t s = (seconds - (h*CURL_OFF_T_C(3600))) - (m*CURL_OFF_T_C(60));
|
2018-11-22 16:01:24 +08:00
|
|
|
msnprintf(r, 9, "%2" CURL_FORMAT_CURL_OFF_T ":%02" CURL_FORMAT_CURL_OFF_T
|
|
|
|
":%02" CURL_FORMAT_CURL_OFF_T, h, m, s);
|
2004-03-23 19:43:34 +08:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* this equals to more than 99 hours, switch to a more suitable output
|
|
|
|
format to fit within the limits. */
|
2018-06-03 04:52:56 +08:00
|
|
|
curl_off_t d = seconds / CURL_OFF_T_C(86400);
|
2008-10-11 09:56:04 +08:00
|
|
|
h = (seconds - (d*CURL_OFF_T_C(86400))) / CURL_OFF_T_C(3600);
|
|
|
|
if(d <= CURL_OFF_T_C(999))
|
2018-11-22 16:01:24 +08:00
|
|
|
msnprintf(r, 9, "%3" CURL_FORMAT_CURL_OFF_T
|
|
|
|
"d %02" CURL_FORMAT_CURL_OFF_T "h", d, h);
|
2004-03-23 19:43:34 +08:00
|
|
|
else
|
2018-11-22 16:01:24 +08:00
|
|
|
msnprintf(r, 9, "%7" CURL_FORMAT_CURL_OFF_T "d", d);
|
2004-03-23 19:43:34 +08:00
|
|
|
}
|
2000-02-15 07:15:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* The point of this function would be to return a string of the input data,
|
2004-06-24 19:54:11 +08:00
|
|
|
but never longer than 5 columns (+ one zero byte).
|
|
|
|
Add suffix k, M, G when suitable... */
|
2004-03-12 05:48:15 +08:00
|
|
|
static char *max5data(curl_off_t bytes, char *max5)
|
2000-02-15 07:15:08 +08:00
|
|
|
{
|
2008-08-13 02:49:33 +08:00
|
|
|
#define ONE_KILOBYTE CURL_OFF_T_C(1024)
|
|
|
|
#define ONE_MEGABYTE (CURL_OFF_T_C(1024) * ONE_KILOBYTE)
|
|
|
|
#define ONE_GIGABYTE (CURL_OFF_T_C(1024) * ONE_MEGABYTE)
|
|
|
|
#define ONE_TERABYTE (CURL_OFF_T_C(1024) * ONE_GIGABYTE)
|
|
|
|
#define ONE_PETABYTE (CURL_OFF_T_C(1024) * ONE_TERABYTE)
|
2000-04-09 03:28:23 +08:00
|
|
|
|
2008-08-13 02:49:33 +08:00
|
|
|
if(bytes < CURL_OFF_T_C(100000))
|
2018-11-22 16:01:24 +08:00
|
|
|
msnprintf(max5, 6, "%5" CURL_FORMAT_CURL_OFF_T, bytes);
|
2008-08-13 02:49:33 +08:00
|
|
|
|
|
|
|
else if(bytes < CURL_OFF_T_C(10000) * ONE_KILOBYTE)
|
2018-11-22 16:01:24 +08:00
|
|
|
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "k", bytes/ONE_KILOBYTE);
|
2008-08-13 02:49:33 +08:00
|
|
|
|
|
|
|
else if(bytes < CURL_OFF_T_C(100) * ONE_MEGABYTE)
|
2000-04-09 03:28:23 +08:00
|
|
|
/* 'XX.XM' is good as long as we're less than 100 megs */
|
2018-11-22 16:01:24 +08:00
|
|
|
msnprintf(max5, 6, "%2" CURL_FORMAT_CURL_OFF_T ".%0"
|
|
|
|
CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE,
|
|
|
|
(bytes%ONE_MEGABYTE) / (ONE_MEGABYTE/CURL_OFF_T_C(10)) );
|
2008-08-13 02:49:33 +08:00
|
|
|
|
|
|
|
else if(bytes < CURL_OFF_T_C(10000) * ONE_MEGABYTE)
|
2004-05-05 16:43:23 +08:00
|
|
|
/* 'XXXXM' is good until we're at 10000MB or above */
|
2018-11-22 16:01:24 +08:00
|
|
|
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "M", bytes/ONE_MEGABYTE);
|
2004-05-05 16:43:23 +08:00
|
|
|
|
2008-08-13 02:49:33 +08:00
|
|
|
else if(bytes < CURL_OFF_T_C(100) * ONE_GIGABYTE)
|
2004-05-05 16:43:23 +08:00
|
|
|
/* 10000 MB - 100 GB, we show it as XX.XG */
|
2018-11-22 16:01:24 +08:00
|
|
|
msnprintf(max5, 6, "%2" CURL_FORMAT_CURL_OFF_T ".%0"
|
|
|
|
CURL_FORMAT_CURL_OFF_T "G", bytes/ONE_GIGABYTE,
|
|
|
|
(bytes%ONE_GIGABYTE) / (ONE_GIGABYTE/CURL_OFF_T_C(10)) );
|
2004-05-05 16:43:23 +08:00
|
|
|
|
2008-08-13 02:49:33 +08:00
|
|
|
else if(bytes < CURL_OFF_T_C(10000) * ONE_GIGABYTE)
|
2004-05-05 16:43:23 +08:00
|
|
|
/* up to 10000GB, display without decimal: XXXXG */
|
2018-11-22 16:01:24 +08:00
|
|
|
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "G", bytes/ONE_GIGABYTE);
|
2004-05-05 16:43:23 +08:00
|
|
|
|
2008-08-13 02:49:33 +08:00
|
|
|
else if(bytes < CURL_OFF_T_C(10000) * ONE_TERABYTE)
|
2004-05-05 16:43:23 +08:00
|
|
|
/* up to 10000TB, display without decimal: XXXXT */
|
2018-11-22 16:01:24 +08:00
|
|
|
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "T", bytes/ONE_TERABYTE);
|
2008-08-13 02:49:33 +08:00
|
|
|
|
|
|
|
else
|
2004-05-05 16:43:23 +08:00
|
|
|
/* up to 10000PB, display without decimal: XXXXP */
|
2018-11-22 16:01:24 +08:00
|
|
|
msnprintf(max5, 6, "%4" CURL_FORMAT_CURL_OFF_T "P", bytes/ONE_PETABYTE);
|
2004-05-05 16:43:23 +08:00
|
|
|
|
2023-02-24 07:45:16 +08:00
|
|
|
/* 16384 petabytes (16 exabytes) is the maximum a 64 bit unsigned number can
|
|
|
|
hold, but our data type is signed so 8192PB will be the maximum. */
|
2004-01-27 20:25:37 +08:00
|
|
|
|
2000-02-15 07:15:08 +08:00
|
|
|
return max5;
|
|
|
|
}
|
2019-02-11 23:38:19 +08:00
|
|
|
#endif
|
2000-02-15 07:15:08 +08:00
|
|
|
|
2004-05-21 04:35:42 +08:00
|
|
|
/*
|
2000-02-15 07:15:08 +08:00
|
|
|
|
|
|
|
New proposed interface, 9th of February 2000:
|
|
|
|
|
|
|
|
pgrsStartNow() - sets start time
|
|
|
|
pgrsSetDownloadSize(x) - known expected download size
|
|
|
|
pgrsSetUploadSize(x) - known expected upload size
|
|
|
|
pgrsSetDownloadCounter() - amount of data currently downloaded
|
|
|
|
pgrsSetUploadCounter() - amount of data currently uploaded
|
|
|
|
pgrsUpdate() - show progress
|
|
|
|
pgrsDone() - transfer complete
|
|
|
|
|
|
|
|
*/
|
2000-06-16 21:15:36 +08:00
|
|
|
|
2021-01-18 18:56:50 +08:00
|
|
|
int Curl_pgrsDone(struct Curl_easy *data)
|
2000-02-15 07:15:08 +08:00
|
|
|
{
|
2012-06-11 05:39:04 +08:00
|
|
|
int rc;
|
2017-09-10 05:09:06 +08:00
|
|
|
data->progress.lastshow = 0;
|
2021-01-18 18:56:50 +08:00
|
|
|
rc = Curl_pgrsUpdate(data); /* the final (forced) update */
|
2012-06-11 05:39:04 +08:00
|
|
|
if(rc)
|
|
|
|
return rc;
|
2004-11-26 22:33:13 +08:00
|
|
|
|
2011-03-21 09:00:29 +08:00
|
|
|
if(!(data->progress.flags & PGRS_HIDE) &&
|
|
|
|
!data->progress.callback)
|
|
|
|
/* only output if we don't use a progress callback and we're not
|
|
|
|
* hidden */
|
|
|
|
fprintf(data->set.err, "\n");
|
|
|
|
|
2004-11-26 22:33:13 +08:00
|
|
|
data->progress.speeder_c = 0; /* reset the progress meter display */
|
2012-06-11 05:39:04 +08:00
|
|
|
return 0;
|
2000-02-15 07:15:08 +08:00
|
|
|
}
|
|
|
|
|
2017-06-22 01:15:46 +08:00
|
|
|
/* reset the known transfer sizes */
|
|
|
|
void Curl_pgrsResetTransferSizes(struct Curl_easy *data)
|
2002-04-16 15:59:20 +08:00
|
|
|
{
|
2014-08-30 05:48:03 +08:00
|
|
|
Curl_pgrsSetDownloadSize(data, -1);
|
|
|
|
Curl_pgrsSetUploadSize(data, -1);
|
2002-04-16 15:59:20 +08:00
|
|
|
}
|
|
|
|
|
2017-06-27 00:51:05 +08:00
|
|
|
/*
|
2020-08-24 17:07:59 +08:00
|
|
|
*
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
* Curl_pgrsTimeWas(). Store the timestamp time at the given label.
|
2017-06-27 00:51:05 +08:00
|
|
|
*/
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
void Curl_pgrsTimeWas(struct Curl_easy *data, timerid timer,
|
|
|
|
struct curltime timestamp)
|
2000-03-02 05:59:59 +08:00
|
|
|
{
|
2019-07-31 21:30:31 +08:00
|
|
|
timediff_t *delta = NULL;
|
2011-12-20 22:05:50 +08:00
|
|
|
|
2000-03-02 05:59:59 +08:00
|
|
|
switch(timer) {
|
|
|
|
default:
|
|
|
|
case TIMER_NONE:
|
|
|
|
/* mistake filter */
|
|
|
|
break;
|
2014-05-16 02:43:32 +08:00
|
|
|
case TIMER_STARTOP:
|
|
|
|
/* This is set at the start of a transfer */
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
data->progress.t_startop = timestamp;
|
2014-05-16 02:43:32 +08:00
|
|
|
break;
|
2000-11-06 23:32:16 +08:00
|
|
|
case TIMER_STARTSINGLE:
|
2023-12-27 16:28:48 +08:00
|
|
|
/* This is set at the start of each single transfer */
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
data->progress.t_startsingle = timestamp;
|
2017-06-22 01:15:46 +08:00
|
|
|
data->progress.is_t_startransfer_set = false;
|
2000-11-06 23:32:16 +08:00
|
|
|
break;
|
2023-12-27 16:28:48 +08:00
|
|
|
case TIMER_POSTQUEUE:
|
|
|
|
/* Set when the transfer starts (after potentially having been brought
|
|
|
|
back from the waiting queue). It needs to count from t_startop and not
|
|
|
|
t_startsingle since the latter is reset when a connection is brought
|
|
|
|
back from the pending queue. */
|
|
|
|
data->progress.t_postqueue =
|
|
|
|
Curl_timediff_us(timestamp, data->progress.t_startop);
|
|
|
|
break;
|
2011-12-19 21:35:20 +08:00
|
|
|
case TIMER_STARTACCEPT:
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
data->progress.t_acceptdata = timestamp;
|
2011-12-19 21:35:20 +08:00
|
|
|
break;
|
2000-03-02 05:59:59 +08:00
|
|
|
case TIMER_NAMELOOKUP:
|
2017-06-07 19:16:56 +08:00
|
|
|
delta = &data->progress.t_nslookup;
|
2000-03-02 05:59:59 +08:00
|
|
|
break;
|
|
|
|
case TIMER_CONNECT:
|
2017-06-07 19:16:56 +08:00
|
|
|
delta = &data->progress.t_connect;
|
2000-03-02 05:59:59 +08:00
|
|
|
break;
|
2008-07-03 14:56:03 +08:00
|
|
|
case TIMER_APPCONNECT:
|
2017-06-07 19:16:56 +08:00
|
|
|
delta = &data->progress.t_appconnect;
|
2008-07-03 14:56:03 +08:00
|
|
|
break;
|
2000-03-02 05:59:59 +08:00
|
|
|
case TIMER_PRETRANSFER:
|
2017-06-07 19:16:56 +08:00
|
|
|
delta = &data->progress.t_pretransfer;
|
2001-11-20 23:00:50 +08:00
|
|
|
break;
|
|
|
|
case TIMER_STARTTRANSFER:
|
2017-06-07 19:16:56 +08:00
|
|
|
delta = &data->progress.t_starttransfer;
|
2017-06-27 00:51:05 +08:00
|
|
|
/* prevent updating t_starttransfer unless:
|
|
|
|
* 1) this is the first time we're setting t_starttransfer
|
|
|
|
* 2) a redirect has occurred since the last time t_starttransfer was set
|
|
|
|
* This prevents repeated invocations of the function from incorrectly
|
|
|
|
* changing the t_starttransfer time.
|
|
|
|
*/
|
2017-06-22 01:15:46 +08:00
|
|
|
if(data->progress.is_t_startransfer_set) {
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
return;
|
2017-06-27 00:51:05 +08:00
|
|
|
}
|
|
|
|
else {
|
2017-06-22 01:15:46 +08:00
|
|
|
data->progress.is_t_startransfer_set = true;
|
2017-06-27 00:51:05 +08:00
|
|
|
break;
|
|
|
|
}
|
2000-03-02 05:59:59 +08:00
|
|
|
case TIMER_POSTRANSFER:
|
|
|
|
/* this is the normal end-of-transfer thing */
|
|
|
|
break;
|
2002-04-16 15:59:20 +08:00
|
|
|
case TIMER_REDIRECT:
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
data->progress.t_redirect = Curl_timediff_us(timestamp,
|
|
|
|
data->progress.start);
|
2002-04-16 15:59:20 +08:00
|
|
|
break;
|
2000-03-02 05:59:59 +08:00
|
|
|
}
|
2017-06-07 19:16:56 +08:00
|
|
|
if(delta) {
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
timediff_t us = Curl_timediff_us(timestamp, data->progress.t_startsingle);
|
2017-10-23 18:05:49 +08:00
|
|
|
if(us < 1)
|
|
|
|
us = 1; /* make sure at least one microsecond passed */
|
2017-06-22 01:15:46 +08:00
|
|
|
*delta += us;
|
2017-06-07 19:16:56 +08:00
|
|
|
}
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
*
|
|
|
|
* Curl_pgrsTime(). Store the current time at the given label. This fetches a
|
|
|
|
* fresh "now" and returns it.
|
|
|
|
*
|
|
|
|
* @unittest: 1399
|
|
|
|
*/
|
|
|
|
struct curltime Curl_pgrsTime(struct Curl_easy *data, timerid timer)
|
|
|
|
{
|
|
|
|
struct curltime now = Curl_now();
|
|
|
|
|
|
|
|
Curl_pgrsTimeWas(data, timer, now);
|
2020-08-24 17:07:59 +08:00
|
|
|
return now;
|
2000-03-02 05:59:59 +08:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:47:12 +08:00
|
|
|
void Curl_pgrsStartNow(struct Curl_easy *data)
|
2000-02-15 07:15:08 +08:00
|
|
|
{
|
2001-04-17 23:00:17 +08:00
|
|
|
data->progress.speeder_c = 0; /* reset the progress meter display */
|
2017-10-25 17:59:43 +08:00
|
|
|
data->progress.start = Curl_now();
|
2017-06-22 01:15:46 +08:00
|
|
|
data->progress.is_t_startransfer_set = false;
|
2020-11-08 06:53:24 +08:00
|
|
|
data->progress.ul_limit_start = data->progress.start;
|
|
|
|
data->progress.dl_limit_start = data->progress.start;
|
2021-05-11 15:09:11 +08:00
|
|
|
data->progress.ul_limit_size = 0;
|
|
|
|
data->progress.dl_limit_size = 0;
|
2019-07-29 18:16:43 +08:00
|
|
|
data->progress.downloaded = 0;
|
|
|
|
data->progress.uploaded = 0;
|
2011-09-23 04:34:54 +08:00
|
|
|
/* clear all bits except HIDE and HEADERS_OUT */
|
|
|
|
data->progress.flags &= PGRS_HIDE|PGRS_HEADERS_OUT;
|
2018-03-15 23:43:00 +08:00
|
|
|
Curl_ratelimit(data, data->progress.start);
|
2000-02-15 07:15:08 +08:00
|
|
|
}
|
|
|
|
|
2016-08-17 02:32:02 +08:00
|
|
|
/*
|
2018-03-10 21:07:38 +08:00
|
|
|
* This is used to handle speed limits, calculating how many milliseconds to
|
|
|
|
* wait until we're back under the speed limit, if needed.
|
2016-08-17 02:32:02 +08:00
|
|
|
*
|
|
|
|
* The way it works is by having a "starting point" (time & amount of data
|
2017-03-26 23:02:22 +08:00
|
|
|
* transferred by then) used in the speed computation, to be used instead of
|
|
|
|
* the start of the transfer. This starting point is regularly moved as
|
|
|
|
* transfer goes on, to keep getting accurate values (instead of average over
|
|
|
|
* the entire transfer).
|
2016-08-17 02:32:02 +08:00
|
|
|
*
|
2017-03-26 23:02:22 +08:00
|
|
|
* This function takes the current amount of data transferred, the amount at
|
|
|
|
* the starting point, the limit (in bytes/s), the time of the starting point
|
|
|
|
* and the current time.
|
2016-08-17 02:32:02 +08:00
|
|
|
*
|
2018-03-10 21:07:38 +08:00
|
|
|
* Returns 0 if no waiting is needed or when no waiting is needed but the
|
|
|
|
* starting point should be reset (to current); or the number of milliseconds
|
|
|
|
* to wait to get back under the speed limit.
|
2016-08-17 02:32:02 +08:00
|
|
|
*/
|
2018-03-10 21:07:38 +08:00
|
|
|
timediff_t Curl_pgrsLimitWaitTime(curl_off_t cursize,
|
|
|
|
curl_off_t startsize,
|
|
|
|
curl_off_t limit,
|
|
|
|
struct curltime start,
|
|
|
|
struct curltime now)
|
2016-08-17 02:32:02 +08:00
|
|
|
{
|
2016-11-11 17:19:22 +08:00
|
|
|
curl_off_t size = cursize - startsize;
|
2019-07-31 21:30:31 +08:00
|
|
|
timediff_t minimum;
|
|
|
|
timediff_t actual;
|
2016-08-17 02:32:02 +08:00
|
|
|
|
2018-03-15 23:43:00 +08:00
|
|
|
if(!limit || !size)
|
2018-03-10 21:07:38 +08:00
|
|
|
return 0;
|
|
|
|
|
2018-03-15 23:43:00 +08:00
|
|
|
/*
|
|
|
|
* 'minimum' is the number of milliseconds 'size' should take to download to
|
|
|
|
* stay below 'limit'.
|
|
|
|
*/
|
2018-03-10 21:07:38 +08:00
|
|
|
if(size < CURL_OFF_T_MAX/1000)
|
2020-05-29 06:08:03 +08:00
|
|
|
minimum = (timediff_t) (CURL_OFF_T_C(1000) * size / limit);
|
2018-03-10 21:07:38 +08:00
|
|
|
else {
|
2020-05-29 06:08:03 +08:00
|
|
|
minimum = (timediff_t) (size / limit);
|
2019-07-31 21:30:31 +08:00
|
|
|
if(minimum < TIMEDIFF_T_MAX/1000)
|
2018-03-13 04:26:31 +08:00
|
|
|
minimum *= 1000;
|
2018-03-10 21:07:38 +08:00
|
|
|
else
|
2019-07-31 21:30:31 +08:00
|
|
|
minimum = TIMEDIFF_T_MAX;
|
2018-03-10 21:07:38 +08:00
|
|
|
}
|
2016-08-17 02:32:02 +08:00
|
|
|
|
2018-03-15 23:43:00 +08:00
|
|
|
/*
|
|
|
|
* 'actual' is the time in milliseconds it took to actually download the
|
|
|
|
* last 'size' bytes.
|
|
|
|
*/
|
2023-11-30 17:32:16 +08:00
|
|
|
actual = Curl_timediff_ceil(now, start);
|
2018-03-15 23:43:00 +08:00
|
|
|
if(actual < minimum) {
|
|
|
|
/* if it downloaded the data faster than the limit, make it wait the
|
|
|
|
difference */
|
2018-03-10 21:07:38 +08:00
|
|
|
return (minimum - actual);
|
2018-03-15 23:43:00 +08:00
|
|
|
}
|
2017-03-10 21:28:37 +08:00
|
|
|
|
|
|
|
return 0;
|
2016-08-17 02:32:02 +08:00
|
|
|
}
|
|
|
|
|
2018-03-15 23:43:00 +08:00
|
|
|
/*
|
|
|
|
* Set the number of downloaded bytes so far.
|
|
|
|
*/
|
2023-09-23 17:20:00 +08:00
|
|
|
CURLcode Curl_pgrsSetDownloadCounter(struct Curl_easy *data, curl_off_t size)
|
2000-02-15 07:15:08 +08:00
|
|
|
{
|
|
|
|
data->progress.downloaded = size;
|
2023-09-23 17:20:00 +08:00
|
|
|
return CURLE_OK;
|
2018-03-15 23:43:00 +08:00
|
|
|
}
|
2016-08-17 02:32:02 +08:00
|
|
|
|
2018-03-15 23:43:00 +08:00
|
|
|
/*
|
|
|
|
* Update the timestamp and sizestamp to use for rate limit calculations.
|
|
|
|
*/
|
|
|
|
void Curl_ratelimit(struct Curl_easy *data, struct curltime now)
|
|
|
|
{
|
|
|
|
/* don't set a new stamp unless the time since last update is long enough */
|
2021-03-26 20:08:44 +08:00
|
|
|
if(data->set.max_recv_speed) {
|
2018-03-15 23:43:00 +08:00
|
|
|
if(Curl_timediff(now, data->progress.dl_limit_start) >=
|
|
|
|
MIN_RATE_LIMIT_PERIOD) {
|
|
|
|
data->progress.dl_limit_start = now;
|
|
|
|
data->progress.dl_limit_size = data->progress.downloaded;
|
|
|
|
}
|
|
|
|
}
|
2021-03-26 20:08:44 +08:00
|
|
|
if(data->set.max_send_speed) {
|
2018-03-15 23:43:00 +08:00
|
|
|
if(Curl_timediff(now, data->progress.ul_limit_start) >=
|
|
|
|
MIN_RATE_LIMIT_PERIOD) {
|
|
|
|
data->progress.ul_limit_start = now;
|
|
|
|
data->progress.ul_limit_size = data->progress.uploaded;
|
|
|
|
}
|
2016-08-17 02:32:02 +08:00
|
|
|
}
|
2000-02-15 07:15:08 +08:00
|
|
|
}
|
|
|
|
|
2018-03-15 23:43:00 +08:00
|
|
|
/*
|
|
|
|
* Set the number of uploaded bytes so far.
|
|
|
|
*/
|
2016-06-21 21:47:12 +08:00
|
|
|
void Curl_pgrsSetUploadCounter(struct Curl_easy *data, curl_off_t size)
|
2000-02-15 07:15:08 +08:00
|
|
|
{
|
|
|
|
data->progress.uploaded = size;
|
|
|
|
}
|
|
|
|
|
2016-06-21 21:47:12 +08:00
|
|
|
void Curl_pgrsSetDownloadSize(struct Curl_easy *data, curl_off_t size)
|
2000-02-15 07:15:08 +08:00
|
|
|
{
|
2014-08-30 05:48:03 +08:00
|
|
|
if(size >= 0) {
|
|
|
|
data->progress.size_dl = size;
|
2000-02-16 08:00:27 +08:00
|
|
|
data->progress.flags |= PGRS_DL_SIZE_KNOWN;
|
2014-08-30 05:48:03 +08:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
data->progress.size_dl = 0;
|
2003-05-14 14:31:00 +08:00
|
|
|
data->progress.flags &= ~PGRS_DL_SIZE_KNOWN;
|
2014-08-30 05:48:03 +08:00
|
|
|
}
|
2000-02-15 07:15:08 +08:00
|
|
|
}
|
|
|
|
|
2016-06-21 21:47:12 +08:00
|
|
|
void Curl_pgrsSetUploadSize(struct Curl_easy *data, curl_off_t size)
|
2000-02-15 07:15:08 +08:00
|
|
|
{
|
2014-08-30 05:48:03 +08:00
|
|
|
if(size >= 0) {
|
|
|
|
data->progress.size_ul = size;
|
2000-02-16 08:00:27 +08:00
|
|
|
data->progress.flags |= PGRS_UL_SIZE_KNOWN;
|
2014-08-30 05:48:03 +08:00
|
|
|
}
|
|
|
|
else {
|
|
|
|
data->progress.size_ul = 0;
|
2003-05-14 14:31:00 +08:00
|
|
|
data->progress.flags &= ~PGRS_UL_SIZE_KNOWN;
|
2014-08-30 05:48:03 +08:00
|
|
|
}
|
2000-02-15 07:15:08 +08:00
|
|
|
}
|
|
|
|
|
2021-05-08 19:10:06 +08:00
|
|
|
/* returns the average speed in bytes / second */
|
|
|
|
static curl_off_t trspeed(curl_off_t size, /* number of bytes */
|
|
|
|
curl_off_t us) /* microseconds */
|
2021-05-08 18:28:38 +08:00
|
|
|
{
|
2021-05-08 19:10:06 +08:00
|
|
|
if(us < 1)
|
|
|
|
return size * 1000000;
|
2021-08-31 20:09:28 +08:00
|
|
|
else if(size < CURL_OFF_T_MAX/1000000)
|
|
|
|
return (size * 1000000) / us;
|
|
|
|
else if(us >= 1000000)
|
|
|
|
return size / (us / 1000000);
|
|
|
|
else
|
|
|
|
return CURL_OFF_T_MAX;
|
2021-05-08 18:28:38 +08:00
|
|
|
}
|
|
|
|
|
2019-07-19 05:23:35 +08:00
|
|
|
/* returns TRUE if it's time to show the progress meter */
|
2021-01-18 18:56:50 +08:00
|
|
|
static bool progress_calc(struct Curl_easy *data, struct curltime now)
|
2000-02-15 07:15:08 +08:00
|
|
|
{
|
2019-07-19 05:23:35 +08:00
|
|
|
bool timetoshow = FALSE;
|
2021-05-08 19:14:42 +08:00
|
|
|
struct Progress * const p = &data->progress;
|
2001-04-18 15:25:11 +08:00
|
|
|
|
2021-05-08 18:28:38 +08:00
|
|
|
/* The time spent so far (from the start) in microseconds */
|
2021-05-08 19:14:42 +08:00
|
|
|
p->timespent = Curl_timediff_us(now, p->start);
|
|
|
|
p->dlspeed = trspeed(p->downloaded, p->timespent);
|
|
|
|
p->ulspeed = trspeed(p->uploaded, p->timespent);
|
2000-02-15 07:15:08 +08:00
|
|
|
|
2007-03-19 20:02:33 +08:00
|
|
|
/* Calculations done at most once a second, unless end is reached */
|
2021-05-08 19:14:42 +08:00
|
|
|
if(p->lastshow != now.tv_sec) {
|
2018-06-03 04:52:56 +08:00
|
|
|
int countindex; /* amount of seconds stored in the speeder array */
|
2021-05-08 19:14:42 +08:00
|
|
|
int nowindex = p->speeder_c% CURR_TIME;
|
|
|
|
p->lastshow = now.tv_sec;
|
2019-07-19 05:23:35 +08:00
|
|
|
timetoshow = TRUE;
|
2007-03-19 20:02:33 +08:00
|
|
|
|
2017-06-09 17:16:18 +08:00
|
|
|
/* Let's do the "current speed" thing, with the dl + ul speeds
|
|
|
|
combined. Store the speed at entry 'nowindex'. */
|
2021-05-08 19:14:42 +08:00
|
|
|
p->speeder[ nowindex ] = p->downloaded + p->uploaded;
|
2007-03-19 20:02:33 +08:00
|
|
|
|
|
|
|
/* remember the exact time for this moment */
|
2021-05-08 19:14:42 +08:00
|
|
|
p->speeder_time [ nowindex ] = now;
|
2007-03-19 20:02:33 +08:00
|
|
|
|
|
|
|
/* advance our speeder_c counter, which is increased every time we get
|
|
|
|
here and we expect it to never wrap as 2^32 is a lot of seconds! */
|
2021-05-08 19:14:42 +08:00
|
|
|
p->speeder_c++;
|
2007-03-19 20:02:33 +08:00
|
|
|
|
|
|
|
/* figure out how many index entries of data we have stored in our speeder
|
|
|
|
array. With N_ENTRIES filled in, we have about N_ENTRIES-1 seconds of
|
|
|
|
transfer. Imagine, after one second we have filled in two entries,
|
|
|
|
after two seconds we've filled in three entries etc. */
|
2021-05-08 19:14:42 +08:00
|
|
|
countindex = ((p->speeder_c >= CURR_TIME)? CURR_TIME:p->speeder_c) - 1;
|
2007-03-19 20:02:33 +08:00
|
|
|
|
|
|
|
/* first of all, we don't do this if there's no counted seconds yet */
|
|
|
|
if(countindex) {
|
2018-06-03 04:52:56 +08:00
|
|
|
int checkindex;
|
2017-10-23 18:05:49 +08:00
|
|
|
timediff_t span_ms;
|
2021-05-08 19:14:42 +08:00
|
|
|
curl_off_t amount;
|
2007-03-19 20:02:33 +08:00
|
|
|
|
|
|
|
/* Get the index position to compare with the 'nowindex' position.
|
|
|
|
Get the oldest entry possible. While we have less than CURR_TIME
|
|
|
|
entries, the first entry will remain the oldest. */
|
2021-05-08 19:14:42 +08:00
|
|
|
checkindex = (p->speeder_c >= CURR_TIME)? p->speeder_c%CURR_TIME:0;
|
2007-03-19 20:02:33 +08:00
|
|
|
|
|
|
|
/* Figure out the exact time for the time span */
|
2021-05-08 19:14:42 +08:00
|
|
|
span_ms = Curl_timediff(now, p->speeder_time[checkindex]);
|
2007-03-19 20:02:33 +08:00
|
|
|
if(0 == span_ms)
|
2017-09-10 05:09:06 +08:00
|
|
|
span_ms = 1; /* at least one millisecond MUST have passed */
|
2007-03-19 20:02:33 +08:00
|
|
|
|
|
|
|
/* Calculate the average speed the last 'span_ms' milliseconds */
|
2021-05-08 19:14:42 +08:00
|
|
|
amount = p->speeder[nowindex]- p->speeder[checkindex];
|
|
|
|
|
|
|
|
if(amount > CURL_OFF_T_C(4294967) /* 0xffffffff/1000 */)
|
|
|
|
/* the 'amount' value is bigger than would fit in 32 bits if
|
|
|
|
multiplied with 1000, so we use the double math for this */
|
|
|
|
p->current_speed = (curl_off_t)
|
|
|
|
((double)amount/((double)span_ms/1000.0));
|
|
|
|
else
|
|
|
|
/* the 'amount' value is small enough to fit within 32 bits even
|
|
|
|
when multiplied with 1000 */
|
|
|
|
p->current_speed = amount*CURL_OFF_T_C(1000)/span_ms;
|
2007-03-19 20:02:33 +08:00
|
|
|
}
|
|
|
|
else
|
2017-06-09 17:16:18 +08:00
|
|
|
/* the first second we use the average */
|
2021-05-08 19:14:42 +08:00
|
|
|
p->current_speed = p->ulspeed + p->dlspeed;
|
2007-03-19 20:02:33 +08:00
|
|
|
|
|
|
|
} /* Calculations end */
|
2019-07-19 05:23:35 +08:00
|
|
|
return timetoshow;
|
2019-02-11 23:38:19 +08:00
|
|
|
}
|
2019-05-23 05:15:34 +08:00
|
|
|
|
2019-02-11 23:38:19 +08:00
|
|
|
#ifndef CURL_DISABLE_PROGRESS_METER
|
2021-01-18 18:56:50 +08:00
|
|
|
static void progress_meter(struct Curl_easy *data)
|
2019-02-11 23:38:19 +08:00
|
|
|
{
|
2019-07-19 05:23:35 +08:00
|
|
|
char max5[6][10];
|
|
|
|
curl_off_t dlpercen = 0;
|
|
|
|
curl_off_t ulpercen = 0;
|
|
|
|
curl_off_t total_percen = 0;
|
|
|
|
curl_off_t total_transfer;
|
|
|
|
curl_off_t total_expected_transfer;
|
|
|
|
char time_left[10];
|
|
|
|
char time_total[10];
|
|
|
|
char time_spent[10];
|
|
|
|
curl_off_t ulestimate = 0;
|
|
|
|
curl_off_t dlestimate = 0;
|
|
|
|
curl_off_t total_estimate;
|
|
|
|
curl_off_t timespent =
|
|
|
|
(curl_off_t)data->progress.timespent/1000000; /* seconds */
|
|
|
|
|
|
|
|
if(!(data->progress.flags & PGRS_HEADERS_OUT)) {
|
|
|
|
if(data->state.resume_from) {
|
2007-03-19 20:02:33 +08:00
|
|
|
fprintf(data->set.err,
|
2019-07-19 05:23:35 +08:00
|
|
|
"** Resuming transfer from byte position %"
|
|
|
|
CURL_FORMAT_CURL_OFF_T "\n", data->state.resume_from);
|
2007-03-19 20:02:33 +08:00
|
|
|
}
|
2019-07-19 05:23:35 +08:00
|
|
|
fprintf(data->set.err,
|
|
|
|
" %% Total %% Received %% Xferd Average Speed "
|
|
|
|
"Time Time Time Current\n"
|
|
|
|
" Dload Upload "
|
|
|
|
"Total Spent Left Speed\n");
|
|
|
|
data->progress.flags |= PGRS_HEADERS_OUT; /* headers are shown */
|
|
|
|
}
|
2007-03-19 20:02:33 +08:00
|
|
|
|
2019-07-19 05:23:35 +08:00
|
|
|
/* Figure out the estimated time of arrival for the upload */
|
|
|
|
if((data->progress.flags & PGRS_UL_SIZE_KNOWN) &&
|
|
|
|
(data->progress.ulspeed > CURL_OFF_T_C(0))) {
|
|
|
|
ulestimate = data->progress.size_ul / data->progress.ulspeed;
|
|
|
|
|
|
|
|
if(data->progress.size_ul > CURL_OFF_T_C(10000))
|
|
|
|
ulpercen = data->progress.uploaded /
|
|
|
|
(data->progress.size_ul/CURL_OFF_T_C(100));
|
|
|
|
else if(data->progress.size_ul > CURL_OFF_T_C(0))
|
|
|
|
ulpercen = (data->progress.uploaded*100) /
|
|
|
|
data->progress.size_ul;
|
|
|
|
}
|
2007-03-19 20:02:33 +08:00
|
|
|
|
2019-07-19 05:23:35 +08:00
|
|
|
/* ... and the download */
|
|
|
|
if((data->progress.flags & PGRS_DL_SIZE_KNOWN) &&
|
|
|
|
(data->progress.dlspeed > CURL_OFF_T_C(0))) {
|
|
|
|
dlestimate = data->progress.size_dl / data->progress.dlspeed;
|
|
|
|
|
|
|
|
if(data->progress.size_dl > CURL_OFF_T_C(10000))
|
|
|
|
dlpercen = data->progress.downloaded /
|
|
|
|
(data->progress.size_dl/CURL_OFF_T_C(100));
|
|
|
|
else if(data->progress.size_dl > CURL_OFF_T_C(0))
|
|
|
|
dlpercen = (data->progress.downloaded*100) /
|
|
|
|
data->progress.size_dl;
|
|
|
|
}
|
2007-03-19 20:02:33 +08:00
|
|
|
|
2019-07-19 05:23:35 +08:00
|
|
|
/* Now figure out which of them is slower and use that one for the
|
|
|
|
total estimate! */
|
|
|
|
total_estimate = ulestimate>dlestimate?ulestimate:dlestimate;
|
|
|
|
|
|
|
|
/* create the three time strings */
|
|
|
|
time2str(time_left, total_estimate > 0?(total_estimate - timespent):0);
|
|
|
|
time2str(time_total, total_estimate);
|
|
|
|
time2str(time_spent, timespent);
|
|
|
|
|
|
|
|
/* Get the total amount of data expected to get transferred */
|
|
|
|
total_expected_transfer =
|
|
|
|
((data->progress.flags & PGRS_UL_SIZE_KNOWN)?
|
|
|
|
data->progress.size_ul:data->progress.uploaded)+
|
|
|
|
((data->progress.flags & PGRS_DL_SIZE_KNOWN)?
|
|
|
|
data->progress.size_dl:data->progress.downloaded);
|
|
|
|
|
|
|
|
/* We have transferred this much so far */
|
|
|
|
total_transfer = data->progress.downloaded + data->progress.uploaded;
|
|
|
|
|
|
|
|
/* Get the percentage of data transferred so far */
|
|
|
|
if(total_expected_transfer > CURL_OFF_T_C(10000))
|
|
|
|
total_percen = total_transfer /
|
|
|
|
(total_expected_transfer/CURL_OFF_T_C(100));
|
|
|
|
else if(total_expected_transfer > CURL_OFF_T_C(0))
|
|
|
|
total_percen = (total_transfer*100) / total_expected_transfer;
|
|
|
|
|
|
|
|
fprintf(data->set.err,
|
|
|
|
"\r"
|
|
|
|
"%3" CURL_FORMAT_CURL_OFF_T " %s "
|
|
|
|
"%3" CURL_FORMAT_CURL_OFF_T " %s "
|
|
|
|
"%3" CURL_FORMAT_CURL_OFF_T " %s %s %s %s %s %s %s",
|
|
|
|
total_percen, /* 3 letters */ /* total % */
|
|
|
|
max5data(total_expected_transfer, max5[2]), /* total size */
|
|
|
|
dlpercen, /* 3 letters */ /* rcvd % */
|
|
|
|
max5data(data->progress.downloaded, max5[0]), /* rcvd size */
|
|
|
|
ulpercen, /* 3 letters */ /* xfer % */
|
|
|
|
max5data(data->progress.uploaded, max5[1]), /* xfer size */
|
|
|
|
max5data(data->progress.dlspeed, max5[3]), /* avrg dl speed */
|
|
|
|
max5data(data->progress.ulspeed, max5[4]), /* avrg ul speed */
|
|
|
|
time_total, /* 8 letters */ /* total time */
|
|
|
|
time_spent, /* 8 letters */ /* time spent */
|
|
|
|
time_left, /* 8 letters */ /* time left */
|
|
|
|
max5data(data->progress.current_speed, max5[5])
|
|
|
|
);
|
|
|
|
|
|
|
|
/* we flush the output stream to make it appear as soon as possible */
|
|
|
|
fflush(data->set.err);
|
2019-02-11 23:38:19 +08:00
|
|
|
}
|
|
|
|
#else
|
|
|
|
/* progress bar disabled */
|
2019-07-19 05:23:35 +08:00
|
|
|
#define progress_meter(x) Curl_nop_stmt
|
2019-02-11 23:38:19 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Curl_pgrsUpdate() returns 0 for success or the value returned by the
|
|
|
|
* progress callback!
|
|
|
|
*/
|
2021-01-18 18:56:50 +08:00
|
|
|
int Curl_pgrsUpdate(struct Curl_easy *data)
|
2019-02-11 23:38:19 +08:00
|
|
|
{
|
|
|
|
struct curltime now = Curl_now(); /* what time is it */
|
2021-01-18 18:56:50 +08:00
|
|
|
bool showprogress = progress_calc(data, now);
|
2019-02-11 23:38:19 +08:00
|
|
|
if(!(data->progress.flags & PGRS_HIDE)) {
|
|
|
|
if(data->set.fxferinfo) {
|
|
|
|
int result;
|
|
|
|
/* There's a callback set, call that */
|
|
|
|
Curl_set_in_callback(data, true);
|
|
|
|
result = data->set.fxferinfo(data->set.progress_client,
|
|
|
|
data->progress.size_dl,
|
|
|
|
data->progress.downloaded,
|
|
|
|
data->progress.size_ul,
|
|
|
|
data->progress.uploaded);
|
|
|
|
Curl_set_in_callback(data, false);
|
2019-11-26 16:13:11 +08:00
|
|
|
if(result != CURL_PROGRESSFUNC_CONTINUE) {
|
|
|
|
if(result)
|
|
|
|
failf(data, "Callback aborted");
|
|
|
|
return result;
|
|
|
|
}
|
2019-02-11 23:38:19 +08:00
|
|
|
}
|
2019-11-26 16:13:11 +08:00
|
|
|
else if(data->set.fprogress) {
|
2019-02-11 23:38:19 +08:00
|
|
|
int result;
|
|
|
|
/* The older deprecated callback is set, call that */
|
|
|
|
Curl_set_in_callback(data, true);
|
|
|
|
result = data->set.fprogress(data->set.progress_client,
|
|
|
|
(double)data->progress.size_dl,
|
|
|
|
(double)data->progress.downloaded,
|
|
|
|
(double)data->progress.size_ul,
|
|
|
|
(double)data->progress.uploaded);
|
|
|
|
Curl_set_in_callback(data, false);
|
2019-11-26 16:13:11 +08:00
|
|
|
if(result != CURL_PROGRESSFUNC_CONTINUE) {
|
|
|
|
if(result)
|
|
|
|
failf(data, "Callback aborted");
|
|
|
|
return result;
|
|
|
|
}
|
2019-02-11 23:38:19 +08:00
|
|
|
}
|
2019-07-19 05:23:35 +08:00
|
|
|
|
|
|
|
if(showprogress)
|
2021-01-18 18:56:50 +08:00
|
|
|
progress_meter(data);
|
2019-02-11 23:38:19 +08:00
|
|
|
}
|
2001-08-21 14:29:56 +08:00
|
|
|
|
2000-09-26 05:49:37 +08:00
|
|
|
return 0;
|
2000-02-15 07:15:08 +08:00
|
|
|
}
|