2022-11-11 18:45:34 +08:00
|
|
|
/***************************************************************************
|
|
|
|
* _ _ ____ _
|
|
|
|
* Project ___| | | | _ \| |
|
|
|
|
* / __| | | | |_) | |
|
|
|
|
* | (__| |_| | _ <| |___
|
|
|
|
* \___|\___/|_| \_\_____|
|
|
|
|
*
|
2023-01-02 20:51:48 +08:00
|
|
|
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
|
2022-11-11 18:45:34 +08:00
|
|
|
*
|
|
|
|
* This software is licensed as described in the file COPYING, which
|
|
|
|
* you should have received as part of this distribution. The terms
|
|
|
|
* are also available at https://curl.se/docs/copyright.html.
|
|
|
|
*
|
|
|
|
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
|
|
|
* copies of the Software, and permit persons to whom the Software is
|
|
|
|
* furnished to do so, under the terms of the COPYING file.
|
|
|
|
*
|
|
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
|
|
* KIND, either express or implied.
|
|
|
|
*
|
|
|
|
* SPDX-License-Identifier: curl
|
|
|
|
*
|
|
|
|
***************************************************************************/
|
|
|
|
|
|
|
|
#include "curl_setup.h"
|
|
|
|
|
|
|
|
#include "urldata.h"
|
|
|
|
#include "strerror.h"
|
|
|
|
#include "cfilters.h"
|
|
|
|
#include "connect.h"
|
|
|
|
#include "url.h" /* for Curl_safefree() */
|
|
|
|
#include "sendf.h"
|
|
|
|
#include "sockaddr.h" /* required for Curl_sockaddr_storage */
|
|
|
|
#include "multiif.h"
|
|
|
|
#include "progress.h"
|
lib: introduce struct easy_poll_set for poll information
Connection filter had a `get_select_socks()` method, inspired by the
various `getsocks` functions involved during the lifetime of a
transfer. These, depending on transfer state (CONNECT/DO/DONE/ etc.),
return sockets to monitor and flag if this shall be done for POLLIN
and/or POLLOUT.
Due to this design, sockets and flags could only be added, not
removed. This led to problems in filters like HTTP/2 where flow control
prohibits the sending of data until the peer increases the flow
window. The general transfer loop wants to write, adds POLLOUT, the
socket is writeable but no data can be written.
This leads to cpu busy loops. To prevent that, HTTP/2 did set the
`SEND_HOLD` flag of such a blocked transfer, so the transfer loop cedes
further attempts. This works if only one such filter is involved. If a
HTTP/2 transfer goes through a HTTP/2 proxy, two filters are
setting/clearing this flag and may step on each other's toes.
Connection filters `get_select_socks()` is replaced by
`adjust_pollset()`. They get passed a `struct easy_pollset` that keeps
up to `MAX_SOCKSPEREASYHANDLE` sockets and their `POLLIN|POLLOUT`
flags. This struct is initialized in `multi_getsock()` by calling the
various `getsocks()` implementations based on transfer state, as before.
After protocol handlers/transfer loop have set the sockets and flags
they want, the `easy_pollset` is *always* passed to the filters. Filters
"higher" in the chain are called first, starting at the first
not-yet-connection one. Each filter may add sockets and/or change
flags. When all flags are removed, the socket itself is removed from the
pollset.
Example:
* transfer wants to send, adds POLLOUT
* http/2 filter has a flow control block, removes POLLOUT and adds
POLLIN (it is waiting on a WINDOW_UPDATE from the server)
* TLS filter is connected and changes nothing
* h2-proxy filter also has a flow control block on its tunnel stream,
removes POLLOUT and adds POLLIN also.
* socket filter is connected and changes nothing
* The resulting pollset is then mixed together with all other transfers
and their pollsets, just as before.
Use of `SEND_HOLD` is no longer necessary in the filters.
All filters are adapted for the changed method. The handling in
`multi.c` has been adjusted, but its state handling the the protocol
handlers' `getsocks` method are untouched.
The most affected filters are http/2, ngtcp2, quiche and h2-proxy. TLS
filters needed to be adjusted for the connecting handshake read/write
handling.
No noticeable difference in performance was detected in local scorecard
runs.
Closes #11833
2023-09-04 18:06:07 +08:00
|
|
|
#include "select.h"
|
2022-11-11 18:45:34 +08:00
|
|
|
#include "warnless.h"
|
|
|
|
|
|
|
|
/* The last 3 #include files should be in this order */
|
|
|
|
#include "curl_printf.h"
|
|
|
|
#include "curl_memory.h"
|
|
|
|
#include "memdebug.h"
|
|
|
|
|
|
|
|
#ifndef ARRAYSIZE
|
|
|
|
#define ARRAYSIZE(A) (sizeof(A)/sizeof((A)[0]))
|
|
|
|
#endif
|
|
|
|
|
2024-05-16 19:49:22 +08:00
|
|
|
#ifdef UNITTESTS
|
2023-05-22 15:00:16 +08:00
|
|
|
/* used by unit2600.c */
|
2022-11-11 18:45:34 +08:00
|
|
|
void Curl_cf_def_close(struct Curl_cfilter *cf, struct Curl_easy *data)
|
|
|
|
{
|
|
|
|
cf->connected = FALSE;
|
2022-12-30 16:14:55 +08:00
|
|
|
if(cf->next)
|
2023-07-21 01:07:49 +08:00
|
|
|
cf->next->cft->do_close(cf->next, data);
|
2022-11-11 18:45:34 +08:00
|
|
|
}
|
2023-05-22 15:00:16 +08:00
|
|
|
#endif
|
2022-11-11 18:45:34 +08:00
|
|
|
|
2023-05-22 15:00:16 +08:00
|
|
|
static void conn_report_connect_stats(struct Curl_easy *data,
|
|
|
|
struct connectdata *conn);
|
2022-11-11 18:45:34 +08:00
|
|
|
|
2022-11-22 16:55:41 +08:00
|
|
|
void Curl_cf_def_get_host(struct Curl_cfilter *cf, struct Curl_easy *data,
|
|
|
|
const char **phost, const char **pdisplay_host,
|
|
|
|
int *pport)
|
|
|
|
{
|
2022-12-30 16:14:55 +08:00
|
|
|
if(cf->next)
|
|
|
|
cf->next->cft->get_host(cf->next, data, phost, pdisplay_host, pport);
|
|
|
|
else {
|
|
|
|
*phost = cf->conn->host.name;
|
|
|
|
*pdisplay_host = cf->conn->host.dispname;
|
2024-03-08 17:45:14 +08:00
|
|
|
*pport = cf->conn->primary.remote_port;
|
2022-12-30 16:14:55 +08:00
|
|
|
}
|
2022-11-22 16:55:41 +08:00
|
|
|
}
|
|
|
|
|
lib: introduce struct easy_poll_set for poll information
Connection filter had a `get_select_socks()` method, inspired by the
various `getsocks` functions involved during the lifetime of a
transfer. These, depending on transfer state (CONNECT/DO/DONE/ etc.),
return sockets to monitor and flag if this shall be done for POLLIN
and/or POLLOUT.
Due to this design, sockets and flags could only be added, not
removed. This led to problems in filters like HTTP/2 where flow control
prohibits the sending of data until the peer increases the flow
window. The general transfer loop wants to write, adds POLLOUT, the
socket is writeable but no data can be written.
This leads to cpu busy loops. To prevent that, HTTP/2 did set the
`SEND_HOLD` flag of such a blocked transfer, so the transfer loop cedes
further attempts. This works if only one such filter is involved. If a
HTTP/2 transfer goes through a HTTP/2 proxy, two filters are
setting/clearing this flag and may step on each other's toes.
Connection filters `get_select_socks()` is replaced by
`adjust_pollset()`. They get passed a `struct easy_pollset` that keeps
up to `MAX_SOCKSPEREASYHANDLE` sockets and their `POLLIN|POLLOUT`
flags. This struct is initialized in `multi_getsock()` by calling the
various `getsocks()` implementations based on transfer state, as before.
After protocol handlers/transfer loop have set the sockets and flags
they want, the `easy_pollset` is *always* passed to the filters. Filters
"higher" in the chain are called first, starting at the first
not-yet-connection one. Each filter may add sockets and/or change
flags. When all flags are removed, the socket itself is removed from the
pollset.
Example:
* transfer wants to send, adds POLLOUT
* http/2 filter has a flow control block, removes POLLOUT and adds
POLLIN (it is waiting on a WINDOW_UPDATE from the server)
* TLS filter is connected and changes nothing
* h2-proxy filter also has a flow control block on its tunnel stream,
removes POLLOUT and adds POLLIN also.
* socket filter is connected and changes nothing
* The resulting pollset is then mixed together with all other transfers
and their pollsets, just as before.
Use of `SEND_HOLD` is no longer necessary in the filters.
All filters are adapted for the changed method. The handling in
`multi.c` has been adjusted, but its state handling the the protocol
handlers' `getsocks` method are untouched.
The most affected filters are http/2, ngtcp2, quiche and h2-proxy. TLS
filters needed to be adjusted for the connecting handshake read/write
handling.
No noticeable difference in performance was detected in local scorecard
runs.
Closes #11833
2023-09-04 18:06:07 +08:00
|
|
|
void Curl_cf_def_adjust_pollset(struct Curl_cfilter *cf,
|
2022-11-11 18:45:34 +08:00
|
|
|
struct Curl_easy *data,
|
lib: introduce struct easy_poll_set for poll information
Connection filter had a `get_select_socks()` method, inspired by the
various `getsocks` functions involved during the lifetime of a
transfer. These, depending on transfer state (CONNECT/DO/DONE/ etc.),
return sockets to monitor and flag if this shall be done for POLLIN
and/or POLLOUT.
Due to this design, sockets and flags could only be added, not
removed. This led to problems in filters like HTTP/2 where flow control
prohibits the sending of data until the peer increases the flow
window. The general transfer loop wants to write, adds POLLOUT, the
socket is writeable but no data can be written.
This leads to cpu busy loops. To prevent that, HTTP/2 did set the
`SEND_HOLD` flag of such a blocked transfer, so the transfer loop cedes
further attempts. This works if only one such filter is involved. If a
HTTP/2 transfer goes through a HTTP/2 proxy, two filters are
setting/clearing this flag and may step on each other's toes.
Connection filters `get_select_socks()` is replaced by
`adjust_pollset()`. They get passed a `struct easy_pollset` that keeps
up to `MAX_SOCKSPEREASYHANDLE` sockets and their `POLLIN|POLLOUT`
flags. This struct is initialized in `multi_getsock()` by calling the
various `getsocks()` implementations based on transfer state, as before.
After protocol handlers/transfer loop have set the sockets and flags
they want, the `easy_pollset` is *always* passed to the filters. Filters
"higher" in the chain are called first, starting at the first
not-yet-connection one. Each filter may add sockets and/or change
flags. When all flags are removed, the socket itself is removed from the
pollset.
Example:
* transfer wants to send, adds POLLOUT
* http/2 filter has a flow control block, removes POLLOUT and adds
POLLIN (it is waiting on a WINDOW_UPDATE from the server)
* TLS filter is connected and changes nothing
* h2-proxy filter also has a flow control block on its tunnel stream,
removes POLLOUT and adds POLLIN also.
* socket filter is connected and changes nothing
* The resulting pollset is then mixed together with all other transfers
and their pollsets, just as before.
Use of `SEND_HOLD` is no longer necessary in the filters.
All filters are adapted for the changed method. The handling in
`multi.c` has been adjusted, but its state handling the the protocol
handlers' `getsocks` method are untouched.
The most affected filters are http/2, ngtcp2, quiche and h2-proxy. TLS
filters needed to be adjusted for the connecting handshake read/write
handling.
No noticeable difference in performance was detected in local scorecard
runs.
Closes #11833
2023-09-04 18:06:07 +08:00
|
|
|
struct easy_pollset *ps)
|
2022-11-11 18:45:34 +08:00
|
|
|
{
|
lib: introduce struct easy_poll_set for poll information
Connection filter had a `get_select_socks()` method, inspired by the
various `getsocks` functions involved during the lifetime of a
transfer. These, depending on transfer state (CONNECT/DO/DONE/ etc.),
return sockets to monitor and flag if this shall be done for POLLIN
and/or POLLOUT.
Due to this design, sockets and flags could only be added, not
removed. This led to problems in filters like HTTP/2 where flow control
prohibits the sending of data until the peer increases the flow
window. The general transfer loop wants to write, adds POLLOUT, the
socket is writeable but no data can be written.
This leads to cpu busy loops. To prevent that, HTTP/2 did set the
`SEND_HOLD` flag of such a blocked transfer, so the transfer loop cedes
further attempts. This works if only one such filter is involved. If a
HTTP/2 transfer goes through a HTTP/2 proxy, two filters are
setting/clearing this flag and may step on each other's toes.
Connection filters `get_select_socks()` is replaced by
`adjust_pollset()`. They get passed a `struct easy_pollset` that keeps
up to `MAX_SOCKSPEREASYHANDLE` sockets and their `POLLIN|POLLOUT`
flags. This struct is initialized in `multi_getsock()` by calling the
various `getsocks()` implementations based on transfer state, as before.
After protocol handlers/transfer loop have set the sockets and flags
they want, the `easy_pollset` is *always* passed to the filters. Filters
"higher" in the chain are called first, starting at the first
not-yet-connection one. Each filter may add sockets and/or change
flags. When all flags are removed, the socket itself is removed from the
pollset.
Example:
* transfer wants to send, adds POLLOUT
* http/2 filter has a flow control block, removes POLLOUT and adds
POLLIN (it is waiting on a WINDOW_UPDATE from the server)
* TLS filter is connected and changes nothing
* h2-proxy filter also has a flow control block on its tunnel stream,
removes POLLOUT and adds POLLIN also.
* socket filter is connected and changes nothing
* The resulting pollset is then mixed together with all other transfers
and their pollsets, just as before.
Use of `SEND_HOLD` is no longer necessary in the filters.
All filters are adapted for the changed method. The handling in
`multi.c` has been adjusted, but its state handling the the protocol
handlers' `getsocks` method are untouched.
The most affected filters are http/2, ngtcp2, quiche and h2-proxy. TLS
filters needed to be adjusted for the connecting handshake read/write
handling.
No noticeable difference in performance was detected in local scorecard
runs.
Closes #11833
2023-09-04 18:06:07 +08:00
|
|
|
/* NOP */
|
|
|
|
(void)cf;
|
|
|
|
(void)data;
|
|
|
|
(void)ps;
|
2022-11-11 18:45:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
bool Curl_cf_def_data_pending(struct Curl_cfilter *cf,
|
|
|
|
const struct Curl_easy *data)
|
|
|
|
{
|
2022-12-30 16:14:55 +08:00
|
|
|
return cf->next?
|
|
|
|
cf->next->cft->has_data_pending(cf->next, data) : FALSE;
|
2022-11-11 18:45:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t Curl_cf_def_send(struct Curl_cfilter *cf, struct Curl_easy *data,
|
|
|
|
const void *buf, size_t len, CURLcode *err)
|
|
|
|
{
|
2022-12-30 16:14:55 +08:00
|
|
|
return cf->next?
|
|
|
|
cf->next->cft->do_send(cf->next, data, buf, len, err) :
|
|
|
|
CURLE_RECV_ERROR;
|
2022-11-11 18:45:34 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t Curl_cf_def_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
|
|
|
|
char *buf, size_t len, CURLcode *err)
|
|
|
|
{
|
2022-12-30 16:14:55 +08:00
|
|
|
return cf->next?
|
|
|
|
cf->next->cft->do_recv(cf->next, data, buf, len, err) :
|
|
|
|
CURLE_SEND_ERROR;
|
2022-11-11 18:45:34 +08:00
|
|
|
}
|
|
|
|
|
2022-12-30 16:14:55 +08:00
|
|
|
bool Curl_cf_def_conn_is_alive(struct Curl_cfilter *cf,
|
2023-03-06 19:44:45 +08:00
|
|
|
struct Curl_easy *data,
|
|
|
|
bool *input_pending)
|
2022-11-11 18:45:34 +08:00
|
|
|
{
|
2022-12-30 16:14:55 +08:00
|
|
|
return cf->next?
|
2023-03-06 19:44:45 +08:00
|
|
|
cf->next->cft->is_alive(cf->next, data, input_pending) :
|
2022-12-30 16:14:55 +08:00
|
|
|
FALSE; /* pessimistic in absence of data */
|
|
|
|
}
|
|
|
|
|
|
|
|
CURLcode Curl_cf_def_conn_keep_alive(struct Curl_cfilter *cf,
|
|
|
|
struct Curl_easy *data)
|
|
|
|
{
|
|
|
|
return cf->next?
|
|
|
|
cf->next->cft->keep_alive(cf->next, data) :
|
|
|
|
CURLE_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
CURLcode Curl_cf_def_query(struct Curl_cfilter *cf,
|
|
|
|
struct Curl_easy *data,
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
int query, int *pres1, void *pres2)
|
2022-12-30 16:14:55 +08:00
|
|
|
{
|
|
|
|
return cf->next?
|
|
|
|
cf->next->cft->query(cf->next, data, query, pres1, pres2) :
|
|
|
|
CURLE_UNKNOWN_OPTION;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Curl_conn_cf_discard_chain(struct Curl_cfilter **pcf,
|
|
|
|
struct Curl_easy *data)
|
|
|
|
{
|
|
|
|
struct Curl_cfilter *cfn, *cf = *pcf;
|
2022-11-11 18:45:34 +08:00
|
|
|
|
|
|
|
if(cf) {
|
2022-12-30 16:14:55 +08:00
|
|
|
*pcf = NULL;
|
2022-11-11 18:45:34 +08:00
|
|
|
while(cf) {
|
|
|
|
cfn = cf->next;
|
2022-12-30 16:14:55 +08:00
|
|
|
/* prevent destroying filter to mess with its sub-chain, since
|
|
|
|
* we have the reference now and will call destroy on it.
|
|
|
|
*/
|
|
|
|
cf->next = NULL;
|
2022-11-11 18:45:34 +08:00
|
|
|
cf->cft->destroy(cf, data);
|
|
|
|
free(cf);
|
|
|
|
cf = cfn;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-30 16:14:55 +08:00
|
|
|
void Curl_conn_cf_discard_all(struct Curl_easy *data,
|
|
|
|
struct connectdata *conn, int index)
|
|
|
|
{
|
|
|
|
Curl_conn_cf_discard_chain(&conn->cfilter[index], data);
|
|
|
|
}
|
|
|
|
|
2022-11-22 16:55:41 +08:00
|
|
|
void Curl_conn_close(struct Curl_easy *data, int index)
|
2022-11-11 18:45:34 +08:00
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf;
|
|
|
|
|
2022-11-22 16:55:41 +08:00
|
|
|
DEBUGASSERT(data->conn);
|
2022-11-11 18:45:34 +08:00
|
|
|
/* it is valid to call that without filters being present */
|
2022-11-22 16:55:41 +08:00
|
|
|
cf = data->conn->cfilter[index];
|
2022-11-11 18:45:34 +08:00
|
|
|
if(cf) {
|
2023-07-21 01:07:49 +08:00
|
|
|
cf->cft->do_close(cf, data);
|
2022-11-11 18:45:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-02-14 19:09:32 +08:00
|
|
|
ssize_t Curl_cf_recv(struct Curl_easy *data, int num, char *buf,
|
|
|
|
size_t len, CURLcode *code)
|
2022-11-11 18:45:34 +08:00
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf;
|
|
|
|
|
|
|
|
DEBUGASSERT(data);
|
|
|
|
DEBUGASSERT(data->conn);
|
2024-02-14 19:09:32 +08:00
|
|
|
*code = CURLE_OK;
|
2022-11-11 18:45:34 +08:00
|
|
|
cf = data->conn->cfilter[num];
|
|
|
|
while(cf && !cf->connected) {
|
|
|
|
cf = cf->next;
|
|
|
|
}
|
|
|
|
if(cf) {
|
2024-02-14 19:09:32 +08:00
|
|
|
ssize_t nread = cf->cft->do_recv(cf, data, buf, len, code);
|
|
|
|
DEBUGASSERT(nread >= 0 || *code);
|
|
|
|
DEBUGASSERT(nread < 0 || !*code);
|
|
|
|
return nread;
|
2022-11-11 18:45:34 +08:00
|
|
|
}
|
2023-05-23 18:48:58 +08:00
|
|
|
failf(data, "recv: no filter connected");
|
2022-11-11 18:45:34 +08:00
|
|
|
*code = CURLE_FAILED_INIT;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2024-02-14 19:09:32 +08:00
|
|
|
ssize_t Curl_cf_send(struct Curl_easy *data, int num,
|
|
|
|
const void *mem, size_t len, CURLcode *code)
|
2022-11-11 18:45:34 +08:00
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf;
|
|
|
|
|
|
|
|
DEBUGASSERT(data);
|
|
|
|
DEBUGASSERT(data->conn);
|
2024-02-14 19:09:32 +08:00
|
|
|
*code = CURLE_OK;
|
2022-11-11 18:45:34 +08:00
|
|
|
cf = data->conn->cfilter[num];
|
|
|
|
while(cf && !cf->connected) {
|
|
|
|
cf = cf->next;
|
|
|
|
}
|
|
|
|
if(cf) {
|
2024-02-14 19:09:32 +08:00
|
|
|
ssize_t nwritten = cf->cft->do_send(cf, data, mem, len, code);
|
|
|
|
DEBUGASSERT(nwritten >= 0 || *code);
|
|
|
|
DEBUGASSERT(nwritten < 0 || !*code || !len);
|
|
|
|
return nwritten;
|
2022-11-11 18:45:34 +08:00
|
|
|
}
|
2023-05-23 18:48:58 +08:00
|
|
|
failf(data, "send: no filter connected");
|
2022-12-30 16:14:55 +08:00
|
|
|
DEBUGASSERT(0);
|
2022-11-11 18:45:34 +08:00
|
|
|
*code = CURLE_FAILED_INIT;
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2022-11-22 16:55:41 +08:00
|
|
|
CURLcode Curl_cf_create(struct Curl_cfilter **pcf,
|
|
|
|
const struct Curl_cftype *cft,
|
|
|
|
void *ctx)
|
2022-11-11 18:45:34 +08:00
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf;
|
|
|
|
CURLcode result = CURLE_OUT_OF_MEMORY;
|
|
|
|
|
|
|
|
DEBUGASSERT(cft);
|
2023-11-08 07:22:58 +08:00
|
|
|
cf = calloc(1, sizeof(*cf));
|
2022-11-11 18:45:34 +08:00
|
|
|
if(!cf)
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
cf->cft = cft;
|
|
|
|
cf->ctx = ctx;
|
|
|
|
result = CURLE_OK;
|
|
|
|
out:
|
|
|
|
*pcf = cf;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-11-26 19:43:56 +08:00
|
|
|
void Curl_conn_cf_add(struct Curl_easy *data,
|
|
|
|
struct connectdata *conn,
|
|
|
|
int index,
|
2022-11-22 16:55:41 +08:00
|
|
|
struct Curl_cfilter *cf)
|
2022-11-11 18:45:34 +08:00
|
|
|
{
|
|
|
|
(void)data;
|
2022-11-26 19:43:56 +08:00
|
|
|
DEBUGASSERT(conn);
|
2022-11-22 16:55:41 +08:00
|
|
|
DEBUGASSERT(!cf->conn);
|
|
|
|
DEBUGASSERT(!cf->next);
|
2022-11-26 19:43:56 +08:00
|
|
|
|
|
|
|
cf->next = conn->cfilter[index];
|
|
|
|
cf->conn = conn;
|
2022-11-11 18:45:34 +08:00
|
|
|
cf->sockindex = index;
|
2022-11-26 19:43:56 +08:00
|
|
|
conn->cfilter[index] = cf;
|
2023-08-03 23:32:25 +08:00
|
|
|
CURL_TRC_CF(data, cf, "added");
|
2022-12-30 16:14:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void Curl_conn_cf_insert_after(struct Curl_cfilter *cf_at,
|
|
|
|
struct Curl_cfilter *cf_new)
|
|
|
|
{
|
|
|
|
struct Curl_cfilter *tail, **pnext;
|
|
|
|
|
|
|
|
DEBUGASSERT(cf_at);
|
|
|
|
DEBUGASSERT(cf_new);
|
|
|
|
DEBUGASSERT(!cf_new->conn);
|
|
|
|
|
|
|
|
tail = cf_at->next;
|
|
|
|
cf_at->next = cf_new;
|
|
|
|
do {
|
|
|
|
cf_new->conn = cf_at->conn;
|
|
|
|
cf_new->sockindex = cf_at->sockindex;
|
|
|
|
pnext = &cf_new->next;
|
|
|
|
cf_new = cf_new->next;
|
|
|
|
} while(cf_new);
|
|
|
|
*pnext = tail;
|
2022-11-22 16:55:41 +08:00
|
|
|
}
|
|
|
|
|
2023-04-06 15:54:57 +08:00
|
|
|
bool Curl_conn_cf_discard_sub(struct Curl_cfilter *cf,
|
|
|
|
struct Curl_cfilter *discard,
|
|
|
|
struct Curl_easy *data,
|
|
|
|
bool destroy_always)
|
2022-11-22 16:55:41 +08:00
|
|
|
{
|
2023-04-06 15:54:57 +08:00
|
|
|
struct Curl_cfilter **pprev = &cf->next;
|
|
|
|
bool found = FALSE;
|
2022-11-22 16:55:41 +08:00
|
|
|
|
2023-04-06 15:54:57 +08:00
|
|
|
/* remove from sub-chain and destroy */
|
2022-11-22 16:55:41 +08:00
|
|
|
DEBUGASSERT(cf);
|
2023-04-27 22:29:45 +08:00
|
|
|
while(*pprev) {
|
|
|
|
if(*pprev == cf) {
|
2023-04-06 15:54:57 +08:00
|
|
|
*pprev = discard->next;
|
|
|
|
discard->next = NULL;
|
|
|
|
found = TRUE;
|
2022-11-22 16:55:41 +08:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
pprev = &((*pprev)->next);
|
|
|
|
}
|
2023-04-06 15:54:57 +08:00
|
|
|
if(found || destroy_always) {
|
|
|
|
discard->next = NULL;
|
|
|
|
discard->cft->destroy(discard, data);
|
|
|
|
free(discard);
|
|
|
|
}
|
|
|
|
return found;
|
2022-11-11 18:45:34 +08:00
|
|
|
}
|
|
|
|
|
2022-12-30 16:14:55 +08:00
|
|
|
CURLcode Curl_conn_cf_connect(struct Curl_cfilter *cf,
|
|
|
|
struct Curl_easy *data,
|
|
|
|
bool blocking, bool *done)
|
2022-11-25 21:06:43 +08:00
|
|
|
{
|
2022-12-30 16:14:55 +08:00
|
|
|
if(cf)
|
2023-07-21 01:07:49 +08:00
|
|
|
return cf->cft->do_connect(cf, data, blocking, done);
|
2022-12-30 16:14:55 +08:00
|
|
|
return CURLE_FAILED_INIT;
|
2022-11-25 21:06:43 +08:00
|
|
|
}
|
|
|
|
|
2022-12-30 16:14:55 +08:00
|
|
|
void Curl_conn_cf_close(struct Curl_cfilter *cf, struct Curl_easy *data)
|
2022-11-25 21:06:43 +08:00
|
|
|
{
|
2022-12-30 16:14:55 +08:00
|
|
|
if(cf)
|
2023-07-21 01:07:49 +08:00
|
|
|
cf->cft->do_close(cf, data);
|
2022-11-25 21:06:43 +08:00
|
|
|
}
|
|
|
|
|
2022-12-30 16:14:55 +08:00
|
|
|
ssize_t Curl_conn_cf_send(struct Curl_cfilter *cf, struct Curl_easy *data,
|
|
|
|
const void *buf, size_t len, CURLcode *err)
|
|
|
|
{
|
|
|
|
if(cf)
|
|
|
|
return cf->cft->do_send(cf, data, buf, len, err);
|
|
|
|
*err = CURLE_SEND_ERROR;
|
|
|
|
return -1;
|
|
|
|
}
|
2022-11-22 16:55:41 +08:00
|
|
|
|
2022-12-30 16:14:55 +08:00
|
|
|
ssize_t Curl_conn_cf_recv(struct Curl_cfilter *cf, struct Curl_easy *data,
|
|
|
|
char *buf, size_t len, CURLcode *err)
|
|
|
|
{
|
|
|
|
if(cf)
|
|
|
|
return cf->cft->do_recv(cf, data, buf, len, err);
|
|
|
|
*err = CURLE_RECV_ERROR;
|
|
|
|
return -1;
|
2022-11-11 18:45:34 +08:00
|
|
|
}
|
|
|
|
|
2022-11-22 16:55:41 +08:00
|
|
|
CURLcode Curl_conn_connect(struct Curl_easy *data,
|
|
|
|
int sockindex,
|
|
|
|
bool blocking,
|
|
|
|
bool *done)
|
2022-11-11 18:45:34 +08:00
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf;
|
2022-12-30 16:14:55 +08:00
|
|
|
CURLcode result = CURLE_OK;
|
2022-11-11 18:45:34 +08:00
|
|
|
|
|
|
|
DEBUGASSERT(data);
|
2022-12-30 16:14:55 +08:00
|
|
|
DEBUGASSERT(data->conn);
|
2022-11-11 18:45:34 +08:00
|
|
|
|
2022-11-22 16:55:41 +08:00
|
|
|
cf = data->conn->cfilter[sockindex];
|
2022-11-11 18:45:34 +08:00
|
|
|
DEBUGASSERT(cf);
|
2024-06-05 14:31:11 +08:00
|
|
|
if(!cf) {
|
|
|
|
*done = FALSE;
|
2023-01-03 20:13:37 +08:00
|
|
|
return CURLE_FAILED_INIT;
|
2024-06-05 14:31:11 +08:00
|
|
|
}
|
2023-01-03 20:13:37 +08:00
|
|
|
|
2022-12-30 16:14:55 +08:00
|
|
|
*done = cf->connected;
|
|
|
|
if(!*done) {
|
2023-07-21 01:07:49 +08:00
|
|
|
result = cf->cft->do_connect(cf, data, blocking, done);
|
2022-12-30 16:14:55 +08:00
|
|
|
if(!result && *done) {
|
2023-01-03 20:13:37 +08:00
|
|
|
Curl_conn_ev_update_info(data, data->conn);
|
2023-05-22 15:00:16 +08:00
|
|
|
conn_report_connect_stats(data, data->conn);
|
2022-12-30 16:14:55 +08:00
|
|
|
data->conn->keepalive = Curl_now();
|
|
|
|
}
|
2023-03-04 00:54:44 +08:00
|
|
|
else if(result) {
|
2023-05-22 15:00:16 +08:00
|
|
|
conn_report_connect_stats(data, data->conn);
|
2023-03-04 00:54:44 +08:00
|
|
|
}
|
2022-12-30 16:14:55 +08:00
|
|
|
}
|
2022-11-11 18:45:34 +08:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2022-11-22 16:55:41 +08:00
|
|
|
bool Curl_conn_is_connected(struct connectdata *conn, int sockindex)
|
2022-11-11 18:45:34 +08:00
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf;
|
|
|
|
|
|
|
|
cf = conn->cfilter[sockindex];
|
|
|
|
return cf && cf->connected;
|
|
|
|
}
|
|
|
|
|
2022-11-21 22:40:26 +08:00
|
|
|
bool Curl_conn_is_ip_connected(struct Curl_easy *data, int sockindex)
|
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf;
|
|
|
|
|
|
|
|
cf = data->conn->cfilter[sockindex];
|
|
|
|
while(cf) {
|
|
|
|
if(cf->connected)
|
|
|
|
return TRUE;
|
|
|
|
if(cf->cft->flags & CF_TYPE_IP_CONNECT)
|
|
|
|
return FALSE;
|
|
|
|
cf = cf->next;
|
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
2023-04-06 15:54:57 +08:00
|
|
|
bool Curl_conn_cf_is_ssl(struct Curl_cfilter *cf)
|
2022-11-25 21:06:43 +08:00
|
|
|
{
|
|
|
|
for(; cf; cf = cf->next) {
|
|
|
|
if(cf->cft->flags & CF_TYPE_SSL)
|
|
|
|
return TRUE;
|
|
|
|
if(cf->cft->flags & CF_TYPE_IP_CONNECT)
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
2023-04-06 15:54:57 +08:00
|
|
|
bool Curl_conn_is_ssl(struct connectdata *conn, int sockindex)
|
|
|
|
{
|
|
|
|
return conn? Curl_conn_cf_is_ssl(conn->cfilter[sockindex]) : FALSE;
|
|
|
|
}
|
|
|
|
|
2022-12-30 16:14:55 +08:00
|
|
|
bool Curl_conn_is_multiplex(struct connectdata *conn, int sockindex)
|
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf = conn? conn->cfilter[sockindex] : NULL;
|
|
|
|
|
|
|
|
for(; cf; cf = cf->next) {
|
|
|
|
if(cf->cft->flags & CF_TYPE_MULTIPLEX)
|
|
|
|
return TRUE;
|
|
|
|
if(cf->cft->flags & CF_TYPE_IP_CONNECT
|
|
|
|
|| cf->cft->flags & CF_TYPE_SSL)
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
}
|
2022-11-25 21:06:43 +08:00
|
|
|
|
2022-11-22 16:55:41 +08:00
|
|
|
bool Curl_conn_data_pending(struct Curl_easy *data, int sockindex)
|
2022-11-11 18:45:34 +08:00
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf;
|
|
|
|
|
2022-11-22 16:55:41 +08:00
|
|
|
(void)data;
|
2022-11-11 18:45:34 +08:00
|
|
|
DEBUGASSERT(data);
|
2022-11-22 16:55:41 +08:00
|
|
|
DEBUGASSERT(data->conn);
|
|
|
|
|
|
|
|
cf = data->conn->cfilter[sockindex];
|
2022-11-11 18:45:34 +08:00
|
|
|
while(cf && !cf->connected) {
|
|
|
|
cf = cf->next;
|
|
|
|
}
|
|
|
|
if(cf) {
|
|
|
|
return cf->cft->has_data_pending(cf, data);
|
|
|
|
}
|
|
|
|
return FALSE;
|
|
|
|
}
|
|
|
|
|
lib: introduce struct easy_poll_set for poll information
Connection filter had a `get_select_socks()` method, inspired by the
various `getsocks` functions involved during the lifetime of a
transfer. These, depending on transfer state (CONNECT/DO/DONE/ etc.),
return sockets to monitor and flag if this shall be done for POLLIN
and/or POLLOUT.
Due to this design, sockets and flags could only be added, not
removed. This led to problems in filters like HTTP/2 where flow control
prohibits the sending of data until the peer increases the flow
window. The general transfer loop wants to write, adds POLLOUT, the
socket is writeable but no data can be written.
This leads to cpu busy loops. To prevent that, HTTP/2 did set the
`SEND_HOLD` flag of such a blocked transfer, so the transfer loop cedes
further attempts. This works if only one such filter is involved. If a
HTTP/2 transfer goes through a HTTP/2 proxy, two filters are
setting/clearing this flag and may step on each other's toes.
Connection filters `get_select_socks()` is replaced by
`adjust_pollset()`. They get passed a `struct easy_pollset` that keeps
up to `MAX_SOCKSPEREASYHANDLE` sockets and their `POLLIN|POLLOUT`
flags. This struct is initialized in `multi_getsock()` by calling the
various `getsocks()` implementations based on transfer state, as before.
After protocol handlers/transfer loop have set the sockets and flags
they want, the `easy_pollset` is *always* passed to the filters. Filters
"higher" in the chain are called first, starting at the first
not-yet-connection one. Each filter may add sockets and/or change
flags. When all flags are removed, the socket itself is removed from the
pollset.
Example:
* transfer wants to send, adds POLLOUT
* http/2 filter has a flow control block, removes POLLOUT and adds
POLLIN (it is waiting on a WINDOW_UPDATE from the server)
* TLS filter is connected and changes nothing
* h2-proxy filter also has a flow control block on its tunnel stream,
removes POLLOUT and adds POLLIN also.
* socket filter is connected and changes nothing
* The resulting pollset is then mixed together with all other transfers
and their pollsets, just as before.
Use of `SEND_HOLD` is no longer necessary in the filters.
All filters are adapted for the changed method. The handling in
`multi.c` has been adjusted, but its state handling the the protocol
handlers' `getsocks` method are untouched.
The most affected filters are http/2, ngtcp2, quiche and h2-proxy. TLS
filters needed to be adjusted for the connecting handshake read/write
handling.
No noticeable difference in performance was detected in local scorecard
runs.
Closes #11833
2023-09-04 18:06:07 +08:00
|
|
|
void Curl_conn_cf_adjust_pollset(struct Curl_cfilter *cf,
|
|
|
|
struct Curl_easy *data,
|
|
|
|
struct easy_pollset *ps)
|
2022-11-11 18:45:34 +08:00
|
|
|
{
|
lib: introduce struct easy_poll_set for poll information
Connection filter had a `get_select_socks()` method, inspired by the
various `getsocks` functions involved during the lifetime of a
transfer. These, depending on transfer state (CONNECT/DO/DONE/ etc.),
return sockets to monitor and flag if this shall be done for POLLIN
and/or POLLOUT.
Due to this design, sockets and flags could only be added, not
removed. This led to problems in filters like HTTP/2 where flow control
prohibits the sending of data until the peer increases the flow
window. The general transfer loop wants to write, adds POLLOUT, the
socket is writeable but no data can be written.
This leads to cpu busy loops. To prevent that, HTTP/2 did set the
`SEND_HOLD` flag of such a blocked transfer, so the transfer loop cedes
further attempts. This works if only one such filter is involved. If a
HTTP/2 transfer goes through a HTTP/2 proxy, two filters are
setting/clearing this flag and may step on each other's toes.
Connection filters `get_select_socks()` is replaced by
`adjust_pollset()`. They get passed a `struct easy_pollset` that keeps
up to `MAX_SOCKSPEREASYHANDLE` sockets and their `POLLIN|POLLOUT`
flags. This struct is initialized in `multi_getsock()` by calling the
various `getsocks()` implementations based on transfer state, as before.
After protocol handlers/transfer loop have set the sockets and flags
they want, the `easy_pollset` is *always* passed to the filters. Filters
"higher" in the chain are called first, starting at the first
not-yet-connection one. Each filter may add sockets and/or change
flags. When all flags are removed, the socket itself is removed from the
pollset.
Example:
* transfer wants to send, adds POLLOUT
* http/2 filter has a flow control block, removes POLLOUT and adds
POLLIN (it is waiting on a WINDOW_UPDATE from the server)
* TLS filter is connected and changes nothing
* h2-proxy filter also has a flow control block on its tunnel stream,
removes POLLOUT and adds POLLIN also.
* socket filter is connected and changes nothing
* The resulting pollset is then mixed together with all other transfers
and their pollsets, just as before.
Use of `SEND_HOLD` is no longer necessary in the filters.
All filters are adapted for the changed method. The handling in
`multi.c` has been adjusted, but its state handling the the protocol
handlers' `getsocks` method are untouched.
The most affected filters are http/2, ngtcp2, quiche and h2-proxy. TLS
filters needed to be adjusted for the connecting handshake read/write
handling.
No noticeable difference in performance was detected in local scorecard
runs.
Closes #11833
2023-09-04 18:06:07 +08:00
|
|
|
/* Get the lowest not-connected filter, if there are any */
|
|
|
|
while(cf && !cf->connected && cf->next && !cf->next->connected)
|
|
|
|
cf = cf->next;
|
|
|
|
/* From there on, give all filters a chance to adjust the pollset.
|
|
|
|
* Lower filters are called later, so they may override */
|
|
|
|
while(cf) {
|
|
|
|
cf->cft->adjust_pollset(cf, data, ps);
|
|
|
|
cf = cf->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Curl_conn_adjust_pollset(struct Curl_easy *data,
|
|
|
|
struct easy_pollset *ps)
|
|
|
|
{
|
|
|
|
int i;
|
2022-11-11 18:45:34 +08:00
|
|
|
|
|
|
|
DEBUGASSERT(data);
|
2022-11-22 16:55:41 +08:00
|
|
|
DEBUGASSERT(data->conn);
|
lib: introduce struct easy_poll_set for poll information
Connection filter had a `get_select_socks()` method, inspired by the
various `getsocks` functions involved during the lifetime of a
transfer. These, depending on transfer state (CONNECT/DO/DONE/ etc.),
return sockets to monitor and flag if this shall be done for POLLIN
and/or POLLOUT.
Due to this design, sockets and flags could only be added, not
removed. This led to problems in filters like HTTP/2 where flow control
prohibits the sending of data until the peer increases the flow
window. The general transfer loop wants to write, adds POLLOUT, the
socket is writeable but no data can be written.
This leads to cpu busy loops. To prevent that, HTTP/2 did set the
`SEND_HOLD` flag of such a blocked transfer, so the transfer loop cedes
further attempts. This works if only one such filter is involved. If a
HTTP/2 transfer goes through a HTTP/2 proxy, two filters are
setting/clearing this flag and may step on each other's toes.
Connection filters `get_select_socks()` is replaced by
`adjust_pollset()`. They get passed a `struct easy_pollset` that keeps
up to `MAX_SOCKSPEREASYHANDLE` sockets and their `POLLIN|POLLOUT`
flags. This struct is initialized in `multi_getsock()` by calling the
various `getsocks()` implementations based on transfer state, as before.
After protocol handlers/transfer loop have set the sockets and flags
they want, the `easy_pollset` is *always* passed to the filters. Filters
"higher" in the chain are called first, starting at the first
not-yet-connection one. Each filter may add sockets and/or change
flags. When all flags are removed, the socket itself is removed from the
pollset.
Example:
* transfer wants to send, adds POLLOUT
* http/2 filter has a flow control block, removes POLLOUT and adds
POLLIN (it is waiting on a WINDOW_UPDATE from the server)
* TLS filter is connected and changes nothing
* h2-proxy filter also has a flow control block on its tunnel stream,
removes POLLOUT and adds POLLIN also.
* socket filter is connected and changes nothing
* The resulting pollset is then mixed together with all other transfers
and their pollsets, just as before.
Use of `SEND_HOLD` is no longer necessary in the filters.
All filters are adapted for the changed method. The handling in
`multi.c` has been adjusted, but its state handling the the protocol
handlers' `getsocks` method are untouched.
The most affected filters are http/2, ngtcp2, quiche and h2-proxy. TLS
filters needed to be adjusted for the connecting handshake read/write
handling.
No noticeable difference in performance was detected in local scorecard
runs.
Closes #11833
2023-09-04 18:06:07 +08:00
|
|
|
for(i = 0; i < 2; ++i) {
|
|
|
|
Curl_conn_cf_adjust_pollset(data->conn->cfilter[i], data, ps);
|
2022-11-11 18:45:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-22 16:55:41 +08:00
|
|
|
void Curl_conn_get_host(struct Curl_easy *data, int sockindex,
|
|
|
|
const char **phost, const char **pdisplay_host,
|
|
|
|
int *pport)
|
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf;
|
|
|
|
|
|
|
|
DEBUGASSERT(data->conn);
|
|
|
|
cf = data->conn->cfilter[sockindex];
|
|
|
|
if(cf) {
|
|
|
|
cf->cft->get_host(cf, data, phost, pdisplay_host, pport);
|
|
|
|
}
|
|
|
|
else {
|
|
|
|
/* Some filter ask during shutdown for this, mainly for debugging
|
|
|
|
* purposes. We hand out the defaults, however this is not always
|
2022-12-22 22:22:11 +08:00
|
|
|
* accurate, as the connection might be tunneled, etc. But all that
|
2022-11-22 16:55:41 +08:00
|
|
|
* state is already gone here. */
|
|
|
|
*phost = data->conn->host.name;
|
|
|
|
*pdisplay_host = data->conn->host.dispname;
|
|
|
|
*pport = data->conn->remote_port;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-30 16:14:55 +08:00
|
|
|
CURLcode Curl_cf_def_cntrl(struct Curl_cfilter *cf,
|
|
|
|
struct Curl_easy *data,
|
|
|
|
int event, int arg1, void *arg2)
|
|
|
|
{
|
|
|
|
(void)cf;
|
|
|
|
(void)data;
|
|
|
|
(void)event;
|
|
|
|
(void)arg1;
|
|
|
|
(void)arg2;
|
|
|
|
return CURLE_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
CURLcode Curl_conn_cf_cntrl(struct Curl_cfilter *cf,
|
|
|
|
struct Curl_easy *data,
|
|
|
|
bool ignore_result,
|
|
|
|
int event, int arg1, void *arg2)
|
|
|
|
{
|
|
|
|
CURLcode result = CURLE_OK;
|
|
|
|
|
|
|
|
for(; cf; cf = cf->next) {
|
|
|
|
if(Curl_cf_def_cntrl == cf->cft->cntrl)
|
|
|
|
continue;
|
|
|
|
result = cf->cft->cntrl(cf, data, event, arg1, arg2);
|
|
|
|
if(!ignore_result && result)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
curl_socket_t Curl_conn_cf_get_socket(struct Curl_cfilter *cf,
|
|
|
|
struct Curl_easy *data)
|
|
|
|
{
|
|
|
|
curl_socket_t sock;
|
|
|
|
if(cf && !cf->cft->query(cf, data, CF_QUERY_SOCKET, NULL, &sock))
|
|
|
|
return sock;
|
|
|
|
return CURL_SOCKET_BAD;
|
|
|
|
}
|
|
|
|
|
|
|
|
curl_socket_t Curl_conn_get_socket(struct Curl_easy *data, int sockindex)
|
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf;
|
|
|
|
|
|
|
|
cf = data->conn? data->conn->cfilter[sockindex] : NULL;
|
|
|
|
/* if the top filter has not connected, ask it (and its sub-filters)
|
|
|
|
* for the socket. Otherwise conn->sock[sockindex] should have it.
|
|
|
|
*/
|
|
|
|
if(cf && !cf->connected)
|
|
|
|
return Curl_conn_cf_get_socket(cf, data);
|
|
|
|
return data->conn? data->conn->sock[sockindex] : CURL_SOCKET_BAD;
|
|
|
|
}
|
|
|
|
|
2023-10-26 23:02:45 +08:00
|
|
|
void Curl_conn_forget_socket(struct Curl_easy *data, int sockindex)
|
|
|
|
{
|
|
|
|
if(data->conn) {
|
|
|
|
struct Curl_cfilter *cf = data->conn->cfilter[sockindex];
|
|
|
|
if(cf)
|
|
|
|
(void)Curl_conn_cf_cntrl(cf, data, TRUE,
|
|
|
|
CF_CTRL_FORGET_SOCKET, 0, NULL);
|
|
|
|
fake_sclose(data->conn->sock[sockindex]);
|
|
|
|
data->conn->sock[sockindex] = CURL_SOCKET_BAD;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-30 16:14:55 +08:00
|
|
|
static CURLcode cf_cntrl_all(struct connectdata *conn,
|
|
|
|
struct Curl_easy *data,
|
|
|
|
bool ignore_result,
|
|
|
|
int event, int arg1, void *arg2)
|
|
|
|
{
|
|
|
|
CURLcode result = CURLE_OK;
|
|
|
|
size_t i;
|
|
|
|
|
|
|
|
for(i = 0; i < ARRAYSIZE(conn->cfilter); ++i) {
|
|
|
|
result = Curl_conn_cf_cntrl(conn->cfilter[i], data, ignore_result,
|
|
|
|
event, arg1, arg2);
|
|
|
|
if(!ignore_result && result)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
void Curl_conn_ev_data_attach(struct connectdata *conn,
|
|
|
|
struct Curl_easy *data)
|
|
|
|
{
|
|
|
|
cf_cntrl_all(conn, data, TRUE, CF_CTRL_DATA_ATTACH, 0, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Curl_conn_ev_data_detach(struct connectdata *conn,
|
|
|
|
struct Curl_easy *data)
|
|
|
|
{
|
|
|
|
cf_cntrl_all(conn, data, TRUE, CF_CTRL_DATA_DETACH, 0, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
CURLcode Curl_conn_ev_data_setup(struct Curl_easy *data)
|
|
|
|
{
|
|
|
|
return cf_cntrl_all(data->conn, data, FALSE,
|
|
|
|
CF_CTRL_DATA_SETUP, 0, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
CURLcode Curl_conn_ev_data_idle(struct Curl_easy *data)
|
|
|
|
{
|
|
|
|
return cf_cntrl_all(data->conn, data, FALSE,
|
|
|
|
CF_CTRL_DATA_IDLE, 0, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Notify connection filters that the transfer represented by `data`
|
2024-04-10 15:36:10 +08:00
|
|
|
* is done with sending data (e.g. has uploaded everything).
|
2022-12-30 16:14:55 +08:00
|
|
|
*/
|
|
|
|
void Curl_conn_ev_data_done_send(struct Curl_easy *data)
|
|
|
|
{
|
|
|
|
cf_cntrl_all(data->conn, data, TRUE, CF_CTRL_DATA_DONE_SEND, 0, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Notify connection filters that the transfer represented by `data`
|
|
|
|
* is finished - eventually premature, e.g. before being complete.
|
|
|
|
*/
|
|
|
|
void Curl_conn_ev_data_done(struct Curl_easy *data, bool premature)
|
|
|
|
{
|
|
|
|
cf_cntrl_all(data->conn, data, TRUE, CF_CTRL_DATA_DONE, premature, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
CURLcode Curl_conn_ev_data_pause(struct Curl_easy *data, bool do_pause)
|
|
|
|
{
|
|
|
|
return cf_cntrl_all(data->conn, data, FALSE,
|
|
|
|
CF_CTRL_DATA_PAUSE, do_pause, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Curl_conn_ev_update_info(struct Curl_easy *data,
|
|
|
|
struct connectdata *conn)
|
|
|
|
{
|
|
|
|
cf_cntrl_all(conn, data, TRUE, CF_CTRL_CONN_INFO_UPDATE, 0, NULL);
|
|
|
|
}
|
|
|
|
|
2023-05-22 15:00:16 +08:00
|
|
|
/**
|
|
|
|
* Update connection statistics
|
|
|
|
*/
|
|
|
|
static void conn_report_connect_stats(struct Curl_easy *data,
|
|
|
|
struct connectdata *conn)
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
{
|
2023-03-04 00:54:44 +08:00
|
|
|
struct Curl_cfilter *cf = conn->cfilter[FIRSTSOCKET];
|
|
|
|
if(cf) {
|
|
|
|
struct curltime connected;
|
|
|
|
struct curltime appconnected;
|
|
|
|
|
|
|
|
memset(&connected, 0, sizeof(connected));
|
|
|
|
cf->cft->query(cf, data, CF_QUERY_TIMER_CONNECT, NULL, &connected);
|
|
|
|
if(connected.tv_sec || connected.tv_usec)
|
|
|
|
Curl_pgrsTimeWas(data, TIMER_CONNECT, connected);
|
|
|
|
|
|
|
|
memset(&appconnected, 0, sizeof(appconnected));
|
|
|
|
cf->cft->query(cf, data, CF_QUERY_TIMER_APPCONNECT, NULL, &appconnected);
|
|
|
|
if(appconnected.tv_sec || appconnected.tv_usec)
|
|
|
|
Curl_pgrsTimeWas(data, TIMER_APPCONNECT, appconnected);
|
|
|
|
}
|
connections: introduce http/3 happy eyeballs
New cfilter HTTP-CONNECT for h3/h2/http1.1 eyeballing.
- filter is installed when `--http3` in the tool is used (or
the equivalent CURLOPT_ done in the library)
- starts a QUIC/HTTP/3 connect right away. Should that not
succeed after 100ms (subject to change), a parallel attempt
is started for HTTP/2 and HTTP/1.1 via TCP
- both attempts are subject to IPv6/IPv4 eyeballing, same
as happens for other connections
- tie timeout to the ip-version HAPPY_EYEBALLS_TIMEOUT
- use a `soft` timeout at half the value. When the soft timeout
expires, the HTTPS-CONNECT filter checks if the QUIC filter
has received any data from the server. If not, it will start
the HTTP/2 attempt.
HTTP/3(ngtcp2) improvements.
- setting call_data in all cfilter calls similar to http/2 and vtls filters
for use in callback where no stream data is available.
- returning CURLE_PARTIAL_FILE for prematurely terminated transfers
- enabling pytest test_05 for h3
- shifting functionality to "connect" UDP sockets from ngtcp2
implementation into the udp socket cfilter. Because unconnected
UDP sockets are weird. For example they error when adding to a
pollset.
HTTP/3(quiche) improvements.
- fixed upload bug in quiche implementation, now passes 251 and pytest
- error codes on stream RESET
- improved debug logs
- handling of DRAIN during connect
- limiting pending event queue
HTTP/2 cfilter improvements.
- use LOG_CF macros for dynamic logging in debug build
- fix CURLcode on RST streams to be CURLE_PARTIAL_FILE
- enable pytest test_05 for h2
- fix upload pytests and improve parallel transfer performance.
GOAWAY handling for ngtcp2/quiche
- during connect, when the remote server refuses to accept new connections
and closes immediately (so the local conn goes into DRAIN phase), the
connection is torn down and a another attempt is made after a short grace
period.
This is the behaviour observed with nghttpx when we tell it to shut
down gracefully. Tested in pytest test_03_02.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
- new tests/tests-httpd/scorecard.py for testing h3/h2 protocol implementation.
Invoke:
python3 tests/tests-httpd/scorecard.py --help
for usage.
Improvements on gathering connect statistics and socket access.
- new CF_CTRL_CONN_REPORT_STATS cfilter control for having cfilters
report connection statistics. This is triggered when the connection
has completely connected.
- new void Curl_pgrsTimeWas(..) method to report a timer update with
a timestamp of when it happend. This allows for updating timers
"later", e.g. a connect statistic after full connectivity has been
reached.
- in case of HTTP eyeballing, the previous changes will update
statistics only from the filter chain that "won" the eyeballing.
- new cfilter query CF_QUERY_SOCKET for retrieving the socket used
by a filter chain.
Added methods Curl_conn_cf_get_socket() and Curl_conn_get_socket()
for convenient use of this query.
- Change VTLS backend to query their sub-filters for the socket when
checks during the handshake are made.
HTTP/3 documentation on how https eyeballing works.
TLS improvements
- ALPN selection for SSL/SSL-PROXY filters in one vtls set of functions, replaces
copy of logic in all tls backends.
- standardized the infof logging of offered ALPNs
- ALPN negotiated: have common function for all backends that sets alpn proprty
and connection related things based on the negotiated protocol (or lack thereof).
Scorecard with Caddy.
- configure can be run with `--with-test-caddy=path` to specify which caddy to use for testing
- tests/tests-httpd/scorecard.py now measures download speeds with caddy
pytest improvements
- adding Makfile to clean gen dir
- adding nghttpx rundir creation on start
- checking httpd version 2.4.55 for test_05 cases where it is needed. Skipping with message if too old.
- catch exception when checking for caddy existance on system.
Closes #10349
2023-02-02 00:13:12 +08:00
|
|
|
}
|
|
|
|
|
2023-03-06 19:44:45 +08:00
|
|
|
bool Curl_conn_is_alive(struct Curl_easy *data, struct connectdata *conn,
|
|
|
|
bool *input_pending)
|
2022-12-30 16:14:55 +08:00
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf = conn->cfilter[FIRSTSOCKET];
|
2023-03-06 19:44:45 +08:00
|
|
|
return cf && !cf->conn->bits.close &&
|
|
|
|
cf->cft->is_alive(cf, data, input_pending);
|
2022-12-30 16:14:55 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
CURLcode Curl_conn_keep_alive(struct Curl_easy *data,
|
|
|
|
struct connectdata *conn,
|
|
|
|
int sockindex)
|
|
|
|
{
|
|
|
|
struct Curl_cfilter *cf = conn->cfilter[sockindex];
|
|
|
|
return cf? cf->cft->keep_alive(cf, data) : CURLE_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
size_t Curl_conn_get_max_concurrent(struct Curl_easy *data,
|
|
|
|
struct connectdata *conn,
|
|
|
|
int sockindex)
|
|
|
|
{
|
|
|
|
CURLcode result;
|
|
|
|
int n = 0;
|
|
|
|
|
|
|
|
struct Curl_cfilter *cf = conn->cfilter[sockindex];
|
|
|
|
result = cf? cf->cft->query(cf, data, CF_QUERY_MAX_CONCURRENT,
|
|
|
|
&n, NULL) : CURLE_UNKNOWN_OPTION;
|
|
|
|
return (result || n <= 0)? 1 : (size_t)n;
|
|
|
|
}
|
lib: introduce struct easy_poll_set for poll information
Connection filter had a `get_select_socks()` method, inspired by the
various `getsocks` functions involved during the lifetime of a
transfer. These, depending on transfer state (CONNECT/DO/DONE/ etc.),
return sockets to monitor and flag if this shall be done for POLLIN
and/or POLLOUT.
Due to this design, sockets and flags could only be added, not
removed. This led to problems in filters like HTTP/2 where flow control
prohibits the sending of data until the peer increases the flow
window. The general transfer loop wants to write, adds POLLOUT, the
socket is writeable but no data can be written.
This leads to cpu busy loops. To prevent that, HTTP/2 did set the
`SEND_HOLD` flag of such a blocked transfer, so the transfer loop cedes
further attempts. This works if only one such filter is involved. If a
HTTP/2 transfer goes through a HTTP/2 proxy, two filters are
setting/clearing this flag and may step on each other's toes.
Connection filters `get_select_socks()` is replaced by
`adjust_pollset()`. They get passed a `struct easy_pollset` that keeps
up to `MAX_SOCKSPEREASYHANDLE` sockets and their `POLLIN|POLLOUT`
flags. This struct is initialized in `multi_getsock()` by calling the
various `getsocks()` implementations based on transfer state, as before.
After protocol handlers/transfer loop have set the sockets and flags
they want, the `easy_pollset` is *always* passed to the filters. Filters
"higher" in the chain are called first, starting at the first
not-yet-connection one. Each filter may add sockets and/or change
flags. When all flags are removed, the socket itself is removed from the
pollset.
Example:
* transfer wants to send, adds POLLOUT
* http/2 filter has a flow control block, removes POLLOUT and adds
POLLIN (it is waiting on a WINDOW_UPDATE from the server)
* TLS filter is connected and changes nothing
* h2-proxy filter also has a flow control block on its tunnel stream,
removes POLLOUT and adds POLLIN also.
* socket filter is connected and changes nothing
* The resulting pollset is then mixed together with all other transfers
and their pollsets, just as before.
Use of `SEND_HOLD` is no longer necessary in the filters.
All filters are adapted for the changed method. The handling in
`multi.c` has been adjusted, but its state handling the the protocol
handlers' `getsocks` method are untouched.
The most affected filters are http/2, ngtcp2, quiche and h2-proxy. TLS
filters needed to be adjusted for the connecting handshake read/write
handling.
No noticeable difference in performance was detected in local scorecard
runs.
Closes #11833
2023-09-04 18:06:07 +08:00
|
|
|
|
2024-03-27 22:01:06 +08:00
|
|
|
int Curl_conn_get_stream_error(struct Curl_easy *data,
|
|
|
|
struct connectdata *conn,
|
|
|
|
int sockindex)
|
|
|
|
{
|
|
|
|
CURLcode result;
|
|
|
|
int n = 0;
|
|
|
|
|
|
|
|
struct Curl_cfilter *cf = conn->cfilter[sockindex];
|
|
|
|
result = cf? cf->cft->query(cf, data, CF_QUERY_STREAM_ERROR,
|
|
|
|
&n, NULL) : CURLE_UNKNOWN_OPTION;
|
|
|
|
return (result || n < 0)? 0 : n;
|
|
|
|
}
|
|
|
|
|
2024-02-14 19:09:32 +08:00
|
|
|
int Curl_conn_sockindex(struct Curl_easy *data, curl_socket_t sockfd)
|
|
|
|
{
|
|
|
|
if(data && data->conn &&
|
|
|
|
sockfd != CURL_SOCKET_BAD && sockfd == data->conn->sock[SECONDARYSOCKET])
|
|
|
|
return SECONDARYSOCKET;
|
|
|
|
return FIRSTSOCKET;
|
|
|
|
}
|
|
|
|
|
|
|
|
CURLcode Curl_conn_recv(struct Curl_easy *data, int sockindex,
|
|
|
|
char *buf, size_t blen, ssize_t *n)
|
|
|
|
{
|
|
|
|
CURLcode result = CURLE_OK;
|
|
|
|
ssize_t nread;
|
|
|
|
|
|
|
|
DEBUGASSERT(data->conn);
|
|
|
|
nread = data->conn->recv[sockindex](data, sockindex, buf, blen, &result);
|
|
|
|
DEBUGASSERT(nread >= 0 || result);
|
|
|
|
DEBUGASSERT(nread < 0 || !result);
|
|
|
|
*n = (nread >= 0)? (size_t)nread : 0;
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
CURLcode Curl_conn_send(struct Curl_easy *data, int sockindex,
|
|
|
|
const void *buf, size_t blen,
|
lib: Curl_read/Curl_write clarifications
- replace `Curl_read()`, `Curl_write()` and `Curl_nwrite()` to
clarify when and at what level they operate
- send/recv of transfer related data is now done via
`Curl_xfer_send()/Curl_xfer_recv()` which no longer has
socket/socketindex as parameter. It decides on the transfer
setup of `conn->sockfd` and `conn->writesockfd` on which
connection filter chain to operate.
- send/recv on a specific connection filter chain is done via
`Curl_conn_send()/Curl_conn_recv()` which get the socket index
as parameter.
- rename `Curl_setup_transfer()` to `Curl_xfer_setup()` for
naming consistency
- clarify that the special CURLE_AGAIN hangling to return
`CURLE_OK` with length 0 only applies to `Curl_xfer_send()`
and CURLE_AGAIN is returned by all other send() variants.
- fix a bug in websocket `curl_ws_recv()` that mixed up data
when it arrived in more than a single chunk
The method for sending not just raw bytes, but bytes that are either
"headers" or "body". The send abstraction stack, to to bottom, now is:
* `Curl_req_send()`: has parameter to indicate amount of header bytes,
buffers all data.
* `Curl_xfer_send()`: knows on which socket index to send, returns
amount of bytes sent.
* `Curl_conn_send()`: called with socket index, returns amount of bytes
sent.
In addition there is `Curl_req_flush()` for writing out all buffered
bytes.
`Curl_req_send()` is active for requests without body,
`Curl_buffer_send()` still being used for others. This is because the
special quirks need to be addressed in future parts:
* `expect-100` handling
* `Curl_fillreadbuffer()` needs to add directly to the new
`data->req.sendbuf`
* special body handlings, like `chunked` encodings and line end
conversions will be moved into something like a Client Reader.
In functions of the pattern `CURLcode xxx_send(..., ssize_t *written)`,
replace the `ssize_t` with a `size_t`. It makes no sense to allow for negative
values as the returned `CURLcode` already specifies error conditions. This
allows easier handling of lengths without casting.
Closes #12964
2024-02-15 23:22:53 +08:00
|
|
|
size_t *pnwritten)
|
2024-02-14 19:09:32 +08:00
|
|
|
{
|
|
|
|
ssize_t nwritten;
|
|
|
|
CURLcode result = CURLE_OK;
|
|
|
|
struct connectdata *conn;
|
|
|
|
|
|
|
|
DEBUGASSERT(sockindex >= 0 && sockindex < 2);
|
|
|
|
DEBUGASSERT(pnwritten);
|
|
|
|
DEBUGASSERT(data);
|
|
|
|
DEBUGASSERT(data->conn);
|
|
|
|
conn = data->conn;
|
2024-05-20 20:21:05 +08:00
|
|
|
#ifdef DEBUGBUILD
|
2024-02-14 19:09:32 +08:00
|
|
|
{
|
|
|
|
/* Allow debug builds to override this logic to force short sends
|
|
|
|
*/
|
|
|
|
char *p = getenv("CURL_SMALLSENDS");
|
|
|
|
if(p) {
|
|
|
|
size_t altsize = (size_t)strtoul(p, NULL, 10);
|
|
|
|
if(altsize)
|
|
|
|
blen = CURLMIN(blen, altsize);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
nwritten = conn->send[sockindex](data, sockindex, buf, blen, &result);
|
|
|
|
DEBUGASSERT((nwritten >= 0) || result);
|
lib: Curl_read/Curl_write clarifications
- replace `Curl_read()`, `Curl_write()` and `Curl_nwrite()` to
clarify when and at what level they operate
- send/recv of transfer related data is now done via
`Curl_xfer_send()/Curl_xfer_recv()` which no longer has
socket/socketindex as parameter. It decides on the transfer
setup of `conn->sockfd` and `conn->writesockfd` on which
connection filter chain to operate.
- send/recv on a specific connection filter chain is done via
`Curl_conn_send()/Curl_conn_recv()` which get the socket index
as parameter.
- rename `Curl_setup_transfer()` to `Curl_xfer_setup()` for
naming consistency
- clarify that the special CURLE_AGAIN hangling to return
`CURLE_OK` with length 0 only applies to `Curl_xfer_send()`
and CURLE_AGAIN is returned by all other send() variants.
- fix a bug in websocket `curl_ws_recv()` that mixed up data
when it arrived in more than a single chunk
The method for sending not just raw bytes, but bytes that are either
"headers" or "body". The send abstraction stack, to to bottom, now is:
* `Curl_req_send()`: has parameter to indicate amount of header bytes,
buffers all data.
* `Curl_xfer_send()`: knows on which socket index to send, returns
amount of bytes sent.
* `Curl_conn_send()`: called with socket index, returns amount of bytes
sent.
In addition there is `Curl_req_flush()` for writing out all buffered
bytes.
`Curl_req_send()` is active for requests without body,
`Curl_buffer_send()` still being used for others. This is because the
special quirks need to be addressed in future parts:
* `expect-100` handling
* `Curl_fillreadbuffer()` needs to add directly to the new
`data->req.sendbuf`
* special body handlings, like `chunked` encodings and line end
conversions will be moved into something like a Client Reader.
In functions of the pattern `CURLcode xxx_send(..., ssize_t *written)`,
replace the `ssize_t` with a `size_t`. It makes no sense to allow for negative
values as the returned `CURLcode` already specifies error conditions. This
allows easier handling of lengths without casting.
Closes #12964
2024-02-15 23:22:53 +08:00
|
|
|
*pnwritten = (nwritten < 0)? 0 : (size_t)nwritten;
|
2024-02-14 19:09:32 +08:00
|
|
|
return result;
|
|
|
|
}
|
lib: introduce struct easy_poll_set for poll information
Connection filter had a `get_select_socks()` method, inspired by the
various `getsocks` functions involved during the lifetime of a
transfer. These, depending on transfer state (CONNECT/DO/DONE/ etc.),
return sockets to monitor and flag if this shall be done for POLLIN
and/or POLLOUT.
Due to this design, sockets and flags could only be added, not
removed. This led to problems in filters like HTTP/2 where flow control
prohibits the sending of data until the peer increases the flow
window. The general transfer loop wants to write, adds POLLOUT, the
socket is writeable but no data can be written.
This leads to cpu busy loops. To prevent that, HTTP/2 did set the
`SEND_HOLD` flag of such a blocked transfer, so the transfer loop cedes
further attempts. This works if only one such filter is involved. If a
HTTP/2 transfer goes through a HTTP/2 proxy, two filters are
setting/clearing this flag and may step on each other's toes.
Connection filters `get_select_socks()` is replaced by
`adjust_pollset()`. They get passed a `struct easy_pollset` that keeps
up to `MAX_SOCKSPEREASYHANDLE` sockets and their `POLLIN|POLLOUT`
flags. This struct is initialized in `multi_getsock()` by calling the
various `getsocks()` implementations based on transfer state, as before.
After protocol handlers/transfer loop have set the sockets and flags
they want, the `easy_pollset` is *always* passed to the filters. Filters
"higher" in the chain are called first, starting at the first
not-yet-connection one. Each filter may add sockets and/or change
flags. When all flags are removed, the socket itself is removed from the
pollset.
Example:
* transfer wants to send, adds POLLOUT
* http/2 filter has a flow control block, removes POLLOUT and adds
POLLIN (it is waiting on a WINDOW_UPDATE from the server)
* TLS filter is connected and changes nothing
* h2-proxy filter also has a flow control block on its tunnel stream,
removes POLLOUT and adds POLLIN also.
* socket filter is connected and changes nothing
* The resulting pollset is then mixed together with all other transfers
and their pollsets, just as before.
Use of `SEND_HOLD` is no longer necessary in the filters.
All filters are adapted for the changed method. The handling in
`multi.c` has been adjusted, but its state handling the the protocol
handlers' `getsocks` method are untouched.
The most affected filters are http/2, ngtcp2, quiche and h2-proxy. TLS
filters needed to be adjusted for the connecting handshake read/write
handling.
No noticeable difference in performance was detected in local scorecard
runs.
Closes #11833
2023-09-04 18:06:07 +08:00
|
|
|
|
|
|
|
void Curl_pollset_reset(struct Curl_easy *data,
|
|
|
|
struct easy_pollset *ps)
|
|
|
|
{
|
|
|
|
size_t i;
|
|
|
|
(void)data;
|
|
|
|
memset(ps, 0, sizeof(*ps));
|
|
|
|
for(i = 0; i< MAX_SOCKSPEREASYHANDLE; i++)
|
|
|
|
ps->sockets[i] = CURL_SOCKET_BAD;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
void Curl_pollset_change(struct Curl_easy *data,
|
|
|
|
struct easy_pollset *ps, curl_socket_t sock,
|
|
|
|
int add_flags, int remove_flags)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
(void)data;
|
|
|
|
DEBUGASSERT(VALID_SOCK(sock));
|
|
|
|
if(!VALID_SOCK(sock))
|
|
|
|
return;
|
|
|
|
|
|
|
|
DEBUGASSERT(add_flags <= (CURL_POLL_IN|CURL_POLL_OUT));
|
|
|
|
DEBUGASSERT(remove_flags <= (CURL_POLL_IN|CURL_POLL_OUT));
|
|
|
|
DEBUGASSERT((add_flags&remove_flags) == 0); /* no overlap */
|
|
|
|
for(i = 0; i < ps->num; ++i) {
|
|
|
|
if(ps->sockets[i] == sock) {
|
|
|
|
ps->actions[i] &= (unsigned char)(~remove_flags);
|
|
|
|
ps->actions[i] |= (unsigned char)add_flags;
|
|
|
|
/* all gone? remove socket */
|
|
|
|
if(!ps->actions[i]) {
|
|
|
|
if((i + 1) < ps->num) {
|
|
|
|
memmove(&ps->sockets[i], &ps->sockets[i + 1],
|
|
|
|
(ps->num - (i + 1)) * sizeof(ps->sockets[0]));
|
|
|
|
memmove(&ps->actions[i], &ps->actions[i + 1],
|
|
|
|
(ps->num - (i + 1)) * sizeof(ps->actions[0]));
|
|
|
|
}
|
|
|
|
--ps->num;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
/* not present */
|
|
|
|
if(add_flags) {
|
|
|
|
/* Having more SOCKETS per easy handle than what is defined
|
|
|
|
* is a programming error. This indicates that we need
|
|
|
|
* to raise this limit, making easy_pollset larger.
|
|
|
|
* Since we use this in tight loops, we do not want to make
|
|
|
|
* the pollset dynamic unnecessarily.
|
|
|
|
* The current maximum in practise is HTTP/3 eyeballing where
|
|
|
|
* we have up to 4 sockets involved in connection setup.
|
|
|
|
*/
|
|
|
|
DEBUGASSERT(i < MAX_SOCKSPEREASYHANDLE);
|
|
|
|
if(i < MAX_SOCKSPEREASYHANDLE) {
|
|
|
|
ps->sockets[i] = sock;
|
|
|
|
ps->actions[i] = (unsigned char)add_flags;
|
|
|
|
ps->num = i + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Curl_pollset_set(struct Curl_easy *data,
|
|
|
|
struct easy_pollset *ps, curl_socket_t sock,
|
|
|
|
bool do_in, bool do_out)
|
|
|
|
{
|
|
|
|
Curl_pollset_change(data, ps, sock,
|
|
|
|
(do_in?CURL_POLL_IN:0)|(do_out?CURL_POLL_OUT:0),
|
|
|
|
(!do_in?CURL_POLL_IN:0)|(!do_out?CURL_POLL_OUT:0));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ps_add(struct Curl_easy *data, struct easy_pollset *ps,
|
|
|
|
int bitmap, curl_socket_t *socks)
|
|
|
|
{
|
|
|
|
if(bitmap) {
|
|
|
|
int i;
|
|
|
|
for(i = 0; i < MAX_SOCKSPEREASYHANDLE; ++i) {
|
|
|
|
if(!(bitmap & GETSOCK_MASK_RW(i)) || !VALID_SOCK((socks[i]))) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if(bitmap & GETSOCK_READSOCK(i)) {
|
|
|
|
if(bitmap & GETSOCK_WRITESOCK(i))
|
|
|
|
Curl_pollset_add_inout(data, ps, socks[i]);
|
|
|
|
else
|
|
|
|
/* is READ, since we checked MASK_RW above */
|
|
|
|
Curl_pollset_add_in(data, ps, socks[i]);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
Curl_pollset_add_out(data, ps, socks[i]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void Curl_pollset_add_socks(struct Curl_easy *data,
|
|
|
|
struct easy_pollset *ps,
|
|
|
|
int (*get_socks_cb)(struct Curl_easy *data,
|
|
|
|
curl_socket_t *socks))
|
|
|
|
{
|
|
|
|
curl_socket_t socks[MAX_SOCKSPEREASYHANDLE];
|
|
|
|
int bitmap;
|
|
|
|
|
|
|
|
bitmap = get_socks_cb(data, socks);
|
|
|
|
ps_add(data, ps, bitmap, socks);
|
|
|
|
}
|
|
|
|
|
|
|
|
void Curl_pollset_check(struct Curl_easy *data,
|
|
|
|
struct easy_pollset *ps, curl_socket_t sock,
|
|
|
|
bool *pwant_read, bool *pwant_write)
|
|
|
|
{
|
|
|
|
unsigned int i;
|
|
|
|
|
|
|
|
(void)data;
|
|
|
|
DEBUGASSERT(VALID_SOCK(sock));
|
|
|
|
for(i = 0; i < ps->num; ++i) {
|
|
|
|
if(ps->sockets[i] == sock) {
|
|
|
|
*pwant_read = !!(ps->actions[i] & CURL_POLL_IN);
|
|
|
|
*pwant_write = !!(ps->actions[i] & CURL_POLL_OUT);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*pwant_read = *pwant_write = FALSE;
|
|
|
|
}
|