mirror of
https://github.com/curl/curl.git
synced 2024-12-09 06:30:06 +08:00
4b86113f5e
Speed limits (from CURLOPT_MAX_RECV_SPEED_LARGE & CURLOPT_MAX_SEND_SPEED_LARGE) were applied simply by comparing limits with the cumulative average speed of the entire transfer; While this might work at times with good/constant connections, in other cases it can result to the limits simply being "ignored" for more than "short bursts" (as told in man page). Consider a download that goes on much slower than the limit for some time (because bandwidth is used elsewhere, server is slow, whatever the reason), then once things get better, curl would simply ignore the limit up until the average speed (since the beginning of the transfer) reached the limit. This could prove the limit useless to effectively avoid using the entire bandwidth (at least for quite some time). So instead, we now use a "moving starting point" as reference, and every time at least as much as the limit as been transferred, we can reset this starting point to the current position. This gets a good limiting effect that applies to the "current speed" with instant reactivity (in case of sudden speed burst). Closes #971
69 lines
3.0 KiB
C
69 lines
3.0 KiB
C
#ifndef HEADER_CURL_TRANSFER_H
|
|
#define HEADER_CURL_TRANSFER_H
|
|
/***************************************************************************
|
|
* _ _ ____ _
|
|
* Project ___| | | | _ \| |
|
|
* / __| | | | |_) | |
|
|
* | (__| |_| | _ <| |___
|
|
* \___|\___/|_| \_\_____|
|
|
*
|
|
* Copyright (C) 1998 - 2016, Daniel Stenberg, <daniel@haxx.se>, et al.
|
|
*
|
|
* This software is licensed as described in the file COPYING, which
|
|
* you should have received as part of this distribution. The terms
|
|
* are also available at https://curl.haxx.se/docs/copyright.html.
|
|
*
|
|
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
|
* copies of the Software, and permit persons to whom the Software is
|
|
* furnished to do so, under the terms of the COPYING file.
|
|
*
|
|
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
|
* KIND, either express or implied.
|
|
*
|
|
***************************************************************************/
|
|
|
|
void Curl_init_CONNECT(struct Curl_easy *data);
|
|
|
|
CURLcode Curl_pretransfer(struct Curl_easy *data);
|
|
CURLcode Curl_second_connect(struct connectdata *conn);
|
|
CURLcode Curl_posttransfer(struct Curl_easy *data);
|
|
|
|
typedef enum {
|
|
FOLLOW_NONE, /* not used within the function, just a placeholder to
|
|
allow initing to this */
|
|
FOLLOW_FAKE, /* only records stuff, not actually following */
|
|
FOLLOW_RETRY, /* set if this is a request retry as opposed to a real
|
|
redirect following */
|
|
FOLLOW_REDIR, /* a full true redirect */
|
|
FOLLOW_LAST /* never used */
|
|
} followtype;
|
|
|
|
CURLcode Curl_follow(struct Curl_easy *data, char *newurl,
|
|
followtype type);
|
|
CURLcode Curl_readwrite(struct connectdata *conn,
|
|
struct Curl_easy *data, bool *done,
|
|
bool *comeback);
|
|
int Curl_single_getsock(const struct connectdata *conn,
|
|
curl_socket_t *socks,
|
|
int numsocks);
|
|
CURLcode Curl_readrewind(struct connectdata *conn);
|
|
CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp);
|
|
CURLcode Curl_retry_request(struct connectdata *conn, char **url);
|
|
bool Curl_meets_timecondition(struct Curl_easy *data, time_t timeofdoc);
|
|
|
|
/* This sets up a forthcoming transfer */
|
|
void
|
|
Curl_setup_transfer (struct connectdata *data,
|
|
int sockindex, /* socket index to read from or -1 */
|
|
curl_off_t size, /* -1 if unknown at this point */
|
|
bool getheader, /* TRUE if header parsing is wanted */
|
|
curl_off_t *bytecountp, /* return number of bytes read */
|
|
int writesockindex, /* socket index to write to, it may
|
|
very well be the same we read from.
|
|
-1 disables */
|
|
curl_off_t *writecountp /* return number of bytes written */
|
|
);
|
|
|
|
#endif /* HEADER_CURL_TRANSFER_H */
|
|
|