mirror of
https://github.com/curl/curl.git
synced 2024-12-03 06:20:31 +08:00
lib: add bufq
and dynhds
Adding `bufq`: - at init() time configured to hold up to `n` chunks of `m` bytes each. - various methods for reading from and writing to it. - `peek` support to get access to buffered data without copy - `pass` support to allow buffer flushing on write if it becomes full - use case: IO buffers for dynamic reads and writes that do not blow up - distinct from `dynbuf` in that: - it maintains a read position - writes on a full bufq return CURLE_AGAIN instead of nuking itself - Init options: - SOFT_LIMIT: allow writes into a full bufq - NO_SPARES: free empty chunks right away - a `bufc_pool` that can keep a number of spare chunks to be shared between different `bufq` instances Adding `dynhds`: - a straightforward list of name+value pairs as used for HTTP headers - headers can be appended dynamically - headers can be removed again - headers can be replaced - headers can be looked up - http/1.1 formatting into a `dynbuf` - configured at init() with limits on header counts and total string sizes - use case: pass a HTTP request or response around without being version specific - express a HTTP request without a curl easy handle (used in h2 proxy tunnels) - future extension possibilities: - conversions of `dynhds` to nghttp2/nghttp3 name+value arrays Closes #10720
This commit is contained in:
parent
8cabef6fc3
commit
61f52a97e9
@ -105,6 +105,7 @@ LIB_CFILES = \
|
||||
asyn-ares.c \
|
||||
asyn-thread.c \
|
||||
base64.c \
|
||||
bufq.c \
|
||||
bufref.c \
|
||||
c-hyper.c \
|
||||
cf-https-connect.c \
|
||||
@ -135,6 +136,7 @@ LIB_CFILES = \
|
||||
dict.c \
|
||||
doh.c \
|
||||
dynbuf.c \
|
||||
dynhds.c \
|
||||
easy.c \
|
||||
easygetopt.c \
|
||||
easyoptions.c \
|
||||
@ -230,6 +232,7 @@ LIB_HFILES = \
|
||||
amigaos.h \
|
||||
arpa_telnet.h \
|
||||
asyn.h \
|
||||
bufq.h \
|
||||
bufref.h \
|
||||
c-hyper.h \
|
||||
cf-https-connect.h \
|
||||
@ -273,6 +276,7 @@ LIB_HFILES = \
|
||||
dict.h \
|
||||
doh.h \
|
||||
dynbuf.h \
|
||||
dynhds.h \
|
||||
easy_lock.h \
|
||||
easyif.h \
|
||||
easyoptions.h \
|
||||
|
604
lib/bufq.c
Normal file
604
lib/bufq.c
Normal file
@ -0,0 +1,604 @@
|
||||
/***************************************************************************
|
||||
* _ _ ____ _
|
||||
* Project ___| | | | _ \| |
|
||||
* / __| | | | |_) | |
|
||||
* | (__| |_| | _ <| |___
|
||||
* \___|\___/|_| \_\_____|
|
||||
*
|
||||
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
*
|
||||
* This software is licensed as described in the file COPYING, which
|
||||
* you should have received as part of this distribution. The terms
|
||||
* are also available at https://curl.se/docs/copyright.html.
|
||||
*
|
||||
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
||||
* copies of the Software, and permit persons to whom the Software is
|
||||
* furnished to do so, under the terms of the COPYING file.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
* SPDX-License-Identifier: curl
|
||||
*
|
||||
***************************************************************************/
|
||||
|
||||
#include "curl_setup.h"
|
||||
#include "bufq.h"
|
||||
|
||||
/* The last 3 #include files should be in this order */
|
||||
#include "curl_printf.h"
|
||||
#include "curl_memory.h"
|
||||
#include "memdebug.h"
|
||||
|
||||
static bool chunk_is_empty(const struct buf_chunk *chunk)
|
||||
{
|
||||
return chunk->r_offset >= chunk->w_offset;
|
||||
}
|
||||
|
||||
static bool chunk_is_full(const struct buf_chunk *chunk)
|
||||
{
|
||||
return chunk->w_offset >= chunk->dlen;
|
||||
}
|
||||
|
||||
static size_t chunk_len(const struct buf_chunk *chunk)
|
||||
{
|
||||
return chunk->w_offset - chunk->r_offset;
|
||||
}
|
||||
|
||||
static size_t chunk_space(const struct buf_chunk *chunk)
|
||||
{
|
||||
return chunk->dlen - chunk->w_offset;
|
||||
}
|
||||
|
||||
static void chunk_reset(struct buf_chunk *chunk)
|
||||
{
|
||||
chunk->next = NULL;
|
||||
chunk->r_offset = chunk->w_offset = 0;
|
||||
}
|
||||
|
||||
static size_t chunk_append(struct buf_chunk *chunk,
|
||||
const unsigned char *buf, size_t len)
|
||||
{
|
||||
unsigned char *p = &chunk->x.data[chunk->w_offset];
|
||||
size_t n = chunk->dlen - chunk->w_offset;
|
||||
DEBUGASSERT(chunk->dlen >= chunk->w_offset);
|
||||
if(n) {
|
||||
n = CURLMIN(n, len);
|
||||
memcpy(p, buf, n);
|
||||
chunk->w_offset += n;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
static size_t chunk_read(struct buf_chunk *chunk,
|
||||
unsigned char *buf, size_t len)
|
||||
{
|
||||
unsigned char *p = &chunk->x.data[chunk->r_offset];
|
||||
size_t n = chunk->w_offset - chunk->r_offset;
|
||||
DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
|
||||
if(n) {
|
||||
n = CURLMIN(n, len);
|
||||
memcpy(buf, p, n);
|
||||
chunk->r_offset += n;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
static ssize_t chunk_slurp(struct buf_chunk *chunk,
|
||||
Curl_bufq_reader *reader,
|
||||
void *reader_ctx, CURLcode *err)
|
||||
{
|
||||
unsigned char *p = &chunk->x.data[chunk->w_offset];
|
||||
size_t n = chunk->dlen - chunk->w_offset;
|
||||
ssize_t nread;
|
||||
|
||||
DEBUGASSERT(chunk->dlen >= chunk->w_offset);
|
||||
if(!n) {
|
||||
*err = CURLE_AGAIN;
|
||||
return -1;
|
||||
}
|
||||
nread = reader(reader_ctx, p, n, err);
|
||||
if(nread > 0) {
|
||||
DEBUGASSERT((size_t)nread <= n);
|
||||
chunk->w_offset += nread;
|
||||
}
|
||||
return nread;
|
||||
}
|
||||
|
||||
static void chunk_peek(const struct buf_chunk *chunk,
|
||||
const unsigned char **pbuf, size_t *plen)
|
||||
{
|
||||
DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
|
||||
*pbuf = &chunk->x.data[chunk->r_offset];
|
||||
*plen = chunk->w_offset - chunk->r_offset;
|
||||
}
|
||||
|
||||
static void chunk_peek_at(const struct buf_chunk *chunk, size_t offset,
|
||||
const unsigned char **pbuf, size_t *plen)
|
||||
{
|
||||
offset += chunk->r_offset;
|
||||
DEBUGASSERT(chunk->w_offset >= offset);
|
||||
*pbuf = &chunk->x.data[offset];
|
||||
*plen = chunk->w_offset - offset;
|
||||
}
|
||||
|
||||
static size_t chunk_skip(struct buf_chunk *chunk, size_t amount)
|
||||
{
|
||||
size_t n = chunk->w_offset - chunk->r_offset;
|
||||
DEBUGASSERT(chunk->w_offset >= chunk->r_offset);
|
||||
if(n) {
|
||||
n = CURLMIN(n, amount);
|
||||
chunk->r_offset += n;
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
static void chunk_list_free(struct buf_chunk **anchor)
|
||||
{
|
||||
struct buf_chunk *chunk;
|
||||
while(*anchor) {
|
||||
chunk = *anchor;
|
||||
*anchor = chunk->next;
|
||||
free(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
void Curl_bufcp_init(struct bufc_pool *pool,
|
||||
size_t chunk_size, size_t spare_max)
|
||||
{
|
||||
DEBUGASSERT(chunk_size > 0);
|
||||
DEBUGASSERT(spare_max > 0);
|
||||
memset(pool, 0, sizeof(*pool));
|
||||
pool->chunk_size = chunk_size;
|
||||
pool->spare_max = spare_max;
|
||||
}
|
||||
|
||||
CURLcode Curl_bufcp_take(struct bufc_pool *pool,
|
||||
struct buf_chunk **pchunk)
|
||||
{
|
||||
struct buf_chunk *chunk = NULL;
|
||||
|
||||
if(pool->spare) {
|
||||
chunk = pool->spare;
|
||||
pool->spare = chunk->next;
|
||||
--pool->spare_count;
|
||||
chunk_reset(chunk);
|
||||
*pchunk = chunk;
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
chunk = calloc(1, sizeof(*chunk) + pool->chunk_size);
|
||||
if(!chunk) {
|
||||
*pchunk = NULL;
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
}
|
||||
chunk->dlen = pool->chunk_size;
|
||||
*pchunk = chunk;
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
void Curl_bufcp_put(struct bufc_pool *pool,
|
||||
struct buf_chunk *chunk)
|
||||
{
|
||||
if(pool->spare_count >= pool->spare_max) {
|
||||
free(chunk);
|
||||
}
|
||||
else {
|
||||
chunk_reset(chunk);
|
||||
chunk->next = pool->spare;
|
||||
pool->spare = chunk;
|
||||
++pool->spare_count;
|
||||
}
|
||||
}
|
||||
|
||||
void Curl_bufcp_free(struct bufc_pool *pool)
|
||||
{
|
||||
chunk_list_free(&pool->spare);
|
||||
pool->spare_count = 0;
|
||||
}
|
||||
|
||||
static void bufq_init(struct bufq *q, struct bufc_pool *pool,
|
||||
size_t chunk_size, size_t max_chunks, int opts)
|
||||
{
|
||||
DEBUGASSERT(chunk_size > 0);
|
||||
DEBUGASSERT(max_chunks > 0);
|
||||
memset(q, 0, sizeof(*q));
|
||||
q->chunk_size = chunk_size;
|
||||
q->max_chunks = max_chunks;
|
||||
q->pool = pool;
|
||||
q->opts = opts;
|
||||
}
|
||||
|
||||
void Curl_bufq_init2(struct bufq *q, size_t chunk_size, size_t max_chunks,
|
||||
int opts)
|
||||
{
|
||||
bufq_init(q, NULL, chunk_size, max_chunks, opts);
|
||||
}
|
||||
|
||||
void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks)
|
||||
{
|
||||
bufq_init(q, NULL, chunk_size, max_chunks, BUFQ_OPT_NONE);
|
||||
}
|
||||
|
||||
void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
|
||||
size_t max_chunks, int opts)
|
||||
{
|
||||
bufq_init(q, pool, pool->chunk_size, max_chunks, opts);
|
||||
}
|
||||
|
||||
void Curl_bufq_free(struct bufq *q)
|
||||
{
|
||||
chunk_list_free(&q->head);
|
||||
chunk_list_free(&q->spare);
|
||||
q->tail = NULL;
|
||||
q->chunk_count = 0;
|
||||
}
|
||||
|
||||
void Curl_bufq_reset(struct bufq *q)
|
||||
{
|
||||
struct buf_chunk *chunk;
|
||||
while(q->head) {
|
||||
chunk = q->head;
|
||||
q->head = chunk->next;
|
||||
chunk->next = q->spare;
|
||||
q->spare = chunk;
|
||||
}
|
||||
q->tail = NULL;
|
||||
}
|
||||
|
||||
size_t Curl_bufq_len(const struct bufq *q)
|
||||
{
|
||||
const struct buf_chunk *chunk = q->head;
|
||||
size_t len = 0;
|
||||
while(chunk) {
|
||||
len += chunk_len(chunk);
|
||||
chunk = chunk->next;
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
size_t Curl_bufq_space(const struct bufq *q)
|
||||
{
|
||||
size_t space = 0;
|
||||
if(q->tail)
|
||||
space += chunk_space(q->tail);
|
||||
if(q->chunk_count < q->max_chunks) {
|
||||
space += (q->max_chunks - q->chunk_count) * q->chunk_size;
|
||||
}
|
||||
return space;
|
||||
}
|
||||
|
||||
bool Curl_bufq_is_empty(const struct bufq *q)
|
||||
{
|
||||
return !q->head || chunk_is_empty(q->head);
|
||||
}
|
||||
|
||||
bool Curl_bufq_is_full(const struct bufq *q)
|
||||
{
|
||||
if(!q->tail || q->spare)
|
||||
return FALSE;
|
||||
if(q->chunk_count < q->max_chunks)
|
||||
return FALSE;
|
||||
if(q->chunk_count > q->max_chunks)
|
||||
return TRUE;
|
||||
/* we have no spares and cannot make more, is the tail full? */
|
||||
return chunk_is_full(q->tail);
|
||||
}
|
||||
|
||||
static size_t data_pass_size(struct bufq *q)
|
||||
{
|
||||
(void)q;
|
||||
return 4*1024;
|
||||
}
|
||||
|
||||
static struct buf_chunk *get_spare(struct bufq *q)
|
||||
{
|
||||
struct buf_chunk *chunk = NULL;
|
||||
|
||||
if(q->spare) {
|
||||
chunk = q->spare;
|
||||
q->spare = chunk->next;
|
||||
chunk_reset(chunk);
|
||||
return chunk;
|
||||
}
|
||||
|
||||
if(q->chunk_count >= q->max_chunks && (!(q->opts & BUFQ_OPT_SOFT_LIMIT)))
|
||||
return NULL;
|
||||
|
||||
if(q->pool) {
|
||||
if(Curl_bufcp_take(q->pool, &chunk))
|
||||
return NULL;
|
||||
++q->chunk_count;
|
||||
return chunk;
|
||||
}
|
||||
else {
|
||||
chunk = calloc(1, sizeof(*chunk) + q->chunk_size);
|
||||
if(!chunk)
|
||||
return NULL;
|
||||
chunk->dlen = q->chunk_size;
|
||||
++q->chunk_count;
|
||||
return chunk;
|
||||
}
|
||||
}
|
||||
|
||||
static void prune_head(struct bufq *q)
|
||||
{
|
||||
struct buf_chunk *chunk;
|
||||
|
||||
while(q->head && chunk_is_empty(q->head)) {
|
||||
chunk = q->head;
|
||||
q->head = chunk->next;
|
||||
if(q->tail == chunk)
|
||||
q->tail = q->head;
|
||||
if(q->pool) {
|
||||
Curl_bufcp_put(q->pool, chunk);
|
||||
--q->chunk_count;
|
||||
}
|
||||
else if((q->chunk_count > q->max_chunks) ||
|
||||
(q->opts & BUFQ_OPT_NO_SPARES)) {
|
||||
/* SOFT_LIMIT allowed us more than max. free spares until
|
||||
* we are at max again. Or free them if we are configured
|
||||
* to not use spares. */
|
||||
free(chunk);
|
||||
--q->chunk_count;
|
||||
}
|
||||
else {
|
||||
chunk->next = q->spare;
|
||||
q->spare = chunk;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static struct buf_chunk *get_non_full_tail(struct bufq *q)
|
||||
{
|
||||
struct buf_chunk *chunk;
|
||||
|
||||
if(q->tail && !chunk_is_full(q->tail))
|
||||
return q->tail;
|
||||
chunk = get_spare(q);
|
||||
if(chunk) {
|
||||
/* new tail, and possibly new head */
|
||||
if(q->tail) {
|
||||
q->tail->next = chunk;
|
||||
q->tail = chunk;
|
||||
}
|
||||
else {
|
||||
DEBUGASSERT(!q->head);
|
||||
q->head = q->tail = chunk;
|
||||
}
|
||||
}
|
||||
return chunk;
|
||||
}
|
||||
|
||||
ssize_t Curl_bufq_write(struct bufq *q,
|
||||
const unsigned char *buf, size_t len,
|
||||
CURLcode *err)
|
||||
{
|
||||
struct buf_chunk *tail;
|
||||
ssize_t nwritten = 0;
|
||||
size_t n;
|
||||
|
||||
while(len) {
|
||||
tail = get_non_full_tail(q);
|
||||
if(!tail) {
|
||||
if(q->chunk_count < q->max_chunks) {
|
||||
*err = CURLE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
n = chunk_append(tail, buf, len);
|
||||
DEBUGASSERT(n);
|
||||
nwritten += n;
|
||||
buf += n;
|
||||
len -= n;
|
||||
}
|
||||
if(nwritten == 0 && len) {
|
||||
*err = CURLE_AGAIN;
|
||||
return -1;
|
||||
}
|
||||
*err = CURLE_OK;
|
||||
return nwritten;
|
||||
}
|
||||
|
||||
ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
|
||||
CURLcode *err)
|
||||
{
|
||||
ssize_t nread = 0;
|
||||
size_t n;
|
||||
|
||||
*err = CURLE_OK;
|
||||
while(len && q->head) {
|
||||
n = chunk_read(q->head, buf, len);
|
||||
if(n) {
|
||||
nread += n;
|
||||
buf += n;
|
||||
len -= n;
|
||||
}
|
||||
prune_head(q);
|
||||
}
|
||||
if(nread == 0) {
|
||||
*err = CURLE_AGAIN;
|
||||
return -1;
|
||||
}
|
||||
return nread;
|
||||
}
|
||||
|
||||
bool Curl_bufq_peek(const struct bufq *q,
|
||||
const unsigned char **pbuf, size_t *plen)
|
||||
{
|
||||
if(q->head && !chunk_is_empty(q->head)) {
|
||||
chunk_peek(q->head, pbuf, plen);
|
||||
return TRUE;
|
||||
}
|
||||
*pbuf = NULL;
|
||||
*plen = 0;
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
bool Curl_bufq_peek_at(const struct bufq *q, size_t offset,
|
||||
const unsigned char **pbuf, size_t *plen)
|
||||
{
|
||||
struct buf_chunk *c = q->head;
|
||||
size_t clen;
|
||||
|
||||
while(c) {
|
||||
clen = chunk_len(c);
|
||||
if(!clen)
|
||||
break;
|
||||
if(offset >= clen) {
|
||||
offset -= clen;
|
||||
c = c->next;
|
||||
continue;
|
||||
}
|
||||
chunk_peek_at(c, offset, pbuf, plen);
|
||||
return TRUE;
|
||||
}
|
||||
*pbuf = NULL;
|
||||
*plen = 0;
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
void Curl_bufq_skip(struct bufq *q, size_t amount)
|
||||
{
|
||||
size_t n;
|
||||
|
||||
while(amount && q->head) {
|
||||
n = chunk_skip(q->head, amount);
|
||||
amount -= n;
|
||||
prune_head(q);
|
||||
}
|
||||
}
|
||||
|
||||
ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
|
||||
void *writer_ctx, CURLcode *err)
|
||||
{
|
||||
const unsigned char *buf;
|
||||
size_t blen;
|
||||
ssize_t nwritten = 0;
|
||||
|
||||
while(Curl_bufq_peek(q, &buf, &blen)) {
|
||||
ssize_t chunk_written;
|
||||
|
||||
chunk_written = writer(writer_ctx, buf, blen, err);
|
||||
if(chunk_written < 0) {
|
||||
if(!nwritten || *err != CURLE_AGAIN) {
|
||||
/* blocked on first write or real error, fail */
|
||||
nwritten = -1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
Curl_bufq_skip(q, (size_t)chunk_written);
|
||||
nwritten += chunk_written;
|
||||
}
|
||||
return nwritten;
|
||||
}
|
||||
|
||||
ssize_t Curl_bufq_write_pass(struct bufq *q,
|
||||
const unsigned char *buf, size_t len,
|
||||
Curl_bufq_writer *writer, void *writer_ctx,
|
||||
CURLcode *err)
|
||||
{
|
||||
ssize_t nwritten = 0, n;
|
||||
bool prefer_direct = (len >= data_pass_size(q));
|
||||
|
||||
*err = CURLE_OK;
|
||||
while(len) {
|
||||
if(Curl_bufq_is_full(q) || (!Curl_bufq_is_empty(q) && prefer_direct)) {
|
||||
/* try to make room in case we are full
|
||||
* or empty the buffer when adding "large" data */
|
||||
n = Curl_bufq_pass(q, writer, writer_ctx, err);
|
||||
if(n < 0) {
|
||||
if(*err != CURLE_AGAIN) {
|
||||
/* real error, fail */
|
||||
return -1;
|
||||
}
|
||||
/* would block */
|
||||
}
|
||||
}
|
||||
|
||||
if(Curl_bufq_is_empty(q) && prefer_direct) {
|
||||
/* empty and `data` is "large", try passing directly */
|
||||
n = writer(writer_ctx, buf, len, err);
|
||||
if(n < 0) {
|
||||
if(*err != CURLE_AGAIN) {
|
||||
/* real error, fail */
|
||||
return -1;
|
||||
}
|
||||
/* passing would block */
|
||||
n = 0;
|
||||
}
|
||||
buf += (size_t)n;
|
||||
len -= (size_t)n;
|
||||
nwritten += (size_t)n;
|
||||
}
|
||||
|
||||
if(len) {
|
||||
/* Add whatever is remaining now to bufq */
|
||||
n = Curl_bufq_write(q, buf, len, err);
|
||||
if(n < 0) {
|
||||
if(*err != CURLE_AGAIN) {
|
||||
/* real error, fail */
|
||||
return -1;
|
||||
}
|
||||
/* no room in bufq, bail out */
|
||||
goto out;
|
||||
}
|
||||
/* Maybe only part of `data` has been added, continue to loop */
|
||||
buf += (size_t)n;
|
||||
len -= (size_t)n;
|
||||
nwritten += (size_t)n;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return nwritten;
|
||||
}
|
||||
|
||||
ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
|
||||
void *reader_ctx, CURLcode *err)
|
||||
{
|
||||
struct buf_chunk *tail = NULL;
|
||||
ssize_t nread = 0, chunk_nread;
|
||||
|
||||
*err = CURLE_AGAIN;
|
||||
while(1) {
|
||||
tail = get_non_full_tail(q);
|
||||
if(!tail) {
|
||||
if(q->chunk_count < q->max_chunks) {
|
||||
*err = CURLE_OUT_OF_MEMORY;
|
||||
return -1;
|
||||
}
|
||||
else if(nread) {
|
||||
/* full, return what we read */
|
||||
return nread;
|
||||
}
|
||||
else {
|
||||
/* full, blocked */
|
||||
*err = CURLE_AGAIN;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
chunk_nread = chunk_slurp(tail, reader, reader_ctx, err);
|
||||
if(chunk_nread < 0) {
|
||||
if(!nread || *err != CURLE_AGAIN) {
|
||||
/* blocked on first read or real error, fail */
|
||||
nread = -1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
else if(chunk_nread == 0) {
|
||||
/* eof */
|
||||
*err = CURLE_OK;
|
||||
break;
|
||||
}
|
||||
nread += chunk_nread;
|
||||
/* give up slurping when we get less bytes than we asked for */
|
||||
if(!chunk_is_full(tail))
|
||||
break;
|
||||
}
|
||||
return nread;
|
||||
}
|
261
lib/bufq.h
Normal file
261
lib/bufq.h
Normal file
@ -0,0 +1,261 @@
|
||||
#ifndef HEADER_CURL_BUFQ_H
|
||||
#define HEADER_CURL_BUFQ_H
|
||||
/***************************************************************************
|
||||
* _ _ ____ _
|
||||
* Project ___| | | | _ \| |
|
||||
* / __| | | | |_) | |
|
||||
* | (__| |_| | _ <| |___
|
||||
* \___|\___/|_| \_\_____|
|
||||
*
|
||||
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
*
|
||||
* This software is licensed as described in the file COPYING, which
|
||||
* you should have received as part of this distribution. The terms
|
||||
* are also available at https://curl.se/docs/copyright.html.
|
||||
*
|
||||
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
||||
* copies of the Software, and permit persons to whom the Software is
|
||||
* furnished to do so, under the terms of the COPYING file.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
* SPDX-License-Identifier: curl
|
||||
*
|
||||
***************************************************************************/
|
||||
#include "curl_setup.h"
|
||||
|
||||
#include <curl/curl.h>
|
||||
|
||||
/**
|
||||
* A chunk of bytes for reading and writing.
|
||||
* The size is fixed a creation with read and write offset
|
||||
* for where unread content is.
|
||||
*/
|
||||
struct buf_chunk {
|
||||
struct buf_chunk *next; /* to keep it in a list */
|
||||
size_t dlen; /* the amount of allocated x.data[] */
|
||||
size_t r_offset; /* first unread bytes */
|
||||
size_t w_offset; /* one after last written byte */
|
||||
union {
|
||||
unsigned char data[1]; /* the buffer for `dlen` bytes */
|
||||
void *dummy; /* alignment */
|
||||
} x;
|
||||
};
|
||||
|
||||
/**
|
||||
* A pool for providing/keeping a number of chunks of the same size
|
||||
*
|
||||
* The same pool can be shared by many `bufq` instances. However, a pool
|
||||
* is not thread safe. All bufqs using it are supposed to operate in the
|
||||
* same thread.
|
||||
*/
|
||||
struct bufc_pool {
|
||||
struct buf_chunk *spare; /* list of available spare chunks */
|
||||
size_t chunk_size; /* the size of chunks in this pool */
|
||||
size_t spare_count; /* current number of spare chunks in list */
|
||||
size_t spare_max; /* max number of spares to keep */
|
||||
};
|
||||
|
||||
void Curl_bufcp_init(struct bufc_pool *pool,
|
||||
size_t chunk_size, size_t spare_max);
|
||||
|
||||
CURLcode Curl_bufcp_take(struct bufc_pool *pool,
|
||||
struct buf_chunk **pchunk);
|
||||
void Curl_bufcp_put(struct bufc_pool *pool,
|
||||
struct buf_chunk *chunk);
|
||||
|
||||
void Curl_bufcp_free(struct bufc_pool *pool);
|
||||
|
||||
/**
|
||||
* A queue of byte chunks for reading and writing.
|
||||
* Reading is done from `head`, writing is done to `tail`.
|
||||
*
|
||||
* `bufq`s can be empty or full or neither. Its `len` is the number
|
||||
* of bytes that can be read. For an empty bufq, `len` will be 0.
|
||||
*
|
||||
* By default, a bufq can hold up to `max_chunks * chunk_size` number
|
||||
* of bytes. When `max_chunks` are used (in the `head` list) and the
|
||||
* `tail` chunk is full, the bufq will report that it is full.
|
||||
*
|
||||
* On a full bufq, `len` may be less than the maximum number of bytes,
|
||||
* e.g. when the head chunk is partially read. `len` may also become
|
||||
* larger than the max when option `BUFQ_OPT_SOFT_LIMIT` is used.
|
||||
*
|
||||
* By default, writing to a full bufq will return (-1, CURLE_AGAIN). Same
|
||||
* as reading from an empty bufq.
|
||||
* With `BUFQ_OPT_SOFT_LIMIT` set, a bufq will allow writing becond this
|
||||
* limit and use more than `max_chunks`. However it will report that it
|
||||
* is full nevertheless. This is provided for situation where writes
|
||||
* preferably never fail (except for memory exhaustion).
|
||||
*
|
||||
* By default and without a pool, a bufq will keep chunks that read
|
||||
* read empty in its `spare` list. Option `BUFQ_OPT_NO_SPARES` will
|
||||
* disable that and free chunks once they become empty.
|
||||
*
|
||||
* When providing a pool to a bufq, all chunk creation and spare handling
|
||||
* will be delegated to that pool.
|
||||
*/
|
||||
struct bufq {
|
||||
struct buf_chunk *head; /* chunk with bytes to read from */
|
||||
struct buf_chunk *tail; /* chunk to write to */
|
||||
struct buf_chunk *spare; /* list of free chunks, unless `pool` */
|
||||
struct bufc_pool *pool; /* optional pool for free chunks */
|
||||
size_t chunk_count; /* current number of chunks in `head+spare` */
|
||||
size_t max_chunks; /* max `head` chunks to use */
|
||||
size_t chunk_size; /* size of chunks to manage */
|
||||
int opts; /* options for handling queue, see below */
|
||||
};
|
||||
|
||||
/**
|
||||
* Default behaviour: chunk limit is "hard", meaning attempts to write
|
||||
* more bytes than can be hold in `max_chunks` is refused and will return
|
||||
* -1, CURLE_AGAIN. */
|
||||
#define BUFQ_OPT_NONE (0)
|
||||
/**
|
||||
* Make `max_chunks` a "soft" limit. A bufq will report that it is "full"
|
||||
* when `max_chunks` are used, but allows writing beyond this limit.
|
||||
*/
|
||||
#define BUFQ_OPT_SOFT_LIMIT (1 << 0)
|
||||
/**
|
||||
* Do not keep spare chunks.
|
||||
*/
|
||||
#define BUFQ_OPT_NO_SPARES (1 << 1)
|
||||
|
||||
/**
|
||||
* Initialize a buffer queue that can hold up to `max_chunks` buffers
|
||||
* each of size `chunk_size`. The bufq will not allow writing of
|
||||
* more bytes than can be held in `max_chunks`.
|
||||
*/
|
||||
void Curl_bufq_init(struct bufq *q, size_t chunk_size, size_t max_chunks);
|
||||
|
||||
/**
|
||||
* Initialize a buffer queue that can hold up to `max_chunks` buffers
|
||||
* each of size `chunk_size` with the given options. See `BUFQ_OPT_*`.
|
||||
*/
|
||||
void Curl_bufq_init2(struct bufq *q, size_t chunk_size,
|
||||
size_t max_chunks, int opts);
|
||||
|
||||
void Curl_bufq_initp(struct bufq *q, struct bufc_pool *pool,
|
||||
size_t max_chunks, int opts);
|
||||
|
||||
/**
|
||||
* Reset the buffer queue to be empty. Will keep any allocated buffer
|
||||
* chunks around.
|
||||
*/
|
||||
void Curl_bufq_reset(struct bufq *q);
|
||||
|
||||
/**
|
||||
* Free all resources held by the buffer queue.
|
||||
*/
|
||||
void Curl_bufq_free(struct bufq *q);
|
||||
|
||||
/**
|
||||
* Return the total amount of data in the queue.
|
||||
*/
|
||||
size_t Curl_bufq_len(const struct bufq *q);
|
||||
|
||||
/**
|
||||
* Return the total amount of free space in the queue.
|
||||
* The returned length is the number of bytes that can
|
||||
* be expected to be written successfully to the bufq,
|
||||
* providing no memory allocations fail.
|
||||
*/
|
||||
size_t Curl_bufq_space(const struct bufq *q);
|
||||
|
||||
/**
|
||||
* Returns TRUE iff there is no data in the buffer queue.
|
||||
*/
|
||||
bool Curl_bufq_is_empty(const struct bufq *q);
|
||||
|
||||
/**
|
||||
* Returns TRUE iff there is no space left in the buffer queue.
|
||||
*/
|
||||
bool Curl_bufq_is_full(const struct bufq *q);
|
||||
|
||||
/**
|
||||
* Write buf to the end of the buffer queue. The buf is copied
|
||||
* and the amount of copied bytes is returned.
|
||||
* A return code of -1 indicates an error, setting `err` to the
|
||||
* cause. An err of CURLE_AGAIN is returned if the buffer queue is full.
|
||||
*/
|
||||
ssize_t Curl_bufq_write(struct bufq *q,
|
||||
const unsigned char *buf, size_t len,
|
||||
CURLcode *err);
|
||||
|
||||
/**
|
||||
* Read buf from the start of the buffer queue. The buf is copied
|
||||
* and the amount of copied bytes is returned.
|
||||
* A return code of -1 indicates an error, setting `err` to the
|
||||
* cause. An err of CURLE_AGAIN is returned if the buffer queue is empty.
|
||||
*/
|
||||
ssize_t Curl_bufq_read(struct bufq *q, unsigned char *buf, size_t len,
|
||||
CURLcode *err);
|
||||
|
||||
/**
|
||||
* Peek at the head chunk in the buffer queue. Returns a pointer to
|
||||
* the chunk buf (at the current offset) and its length. Does not
|
||||
* modify the buffer queue.
|
||||
* Returns TRUE iff bytes are available. Sets `pbuf` to NULL and `plen`
|
||||
* to 0 when no bytes are available.
|
||||
* Repeated calls return the same information until the buffer queue
|
||||
* is modified, see `Curl_bufq_skip()``
|
||||
*/
|
||||
bool Curl_bufq_peek(const struct bufq *q,
|
||||
const unsigned char **pbuf, size_t *plen);
|
||||
|
||||
bool Curl_bufq_peek_at(const struct bufq *q, size_t offset,
|
||||
const unsigned char **pbuf, size_t *plen);
|
||||
|
||||
/**
|
||||
* Tell the buffer queue to discard `amount` buf bytes at the head
|
||||
* of the queue. Skipping more buf than is currently buffered will
|
||||
* just empty the queue.
|
||||
*/
|
||||
void Curl_bufq_skip(struct bufq *q, size_t amount);
|
||||
|
||||
typedef ssize_t Curl_bufq_writer(void *writer_ctx,
|
||||
const unsigned char *buf, size_t len,
|
||||
CURLcode *err);
|
||||
/**
|
||||
* Passes the chunks in the buffer queue to the writer and returns
|
||||
* the amount of buf written. A writer may return -1 and CURLE_AGAIN
|
||||
* to indicate blocking at which point the queue will stop and return
|
||||
* the amount of buf passed so far.
|
||||
* -1 is returned on any other errors reported by the writer.
|
||||
* Note that in case of a -1 chunks may have been written and
|
||||
* the buffer queue will have different length than before.
|
||||
*/
|
||||
ssize_t Curl_bufq_pass(struct bufq *q, Curl_bufq_writer *writer,
|
||||
void *writer_ctx, CURLcode *err);
|
||||
|
||||
typedef ssize_t Curl_bufq_reader(void *reader_ctx,
|
||||
unsigned char *buf, size_t len,
|
||||
CURLcode *err);
|
||||
|
||||
/**
|
||||
* Read date and append it to the end of the buffer queue until the
|
||||
* reader returns blocking or the queue is full. A reader returns
|
||||
* -1 and CURLE_AGAIN to indicate blocking.
|
||||
* Returns the total amount of buf read (may be 0) or -1 on other
|
||||
* reader errors.
|
||||
* Note that in case of a -1 chunks may have been read and
|
||||
* the buffer queue will have different length than before.
|
||||
*/
|
||||
ssize_t Curl_bufq_slurp(struct bufq *q, Curl_bufq_reader *reader,
|
||||
void *reader_ctx, CURLcode *err);
|
||||
|
||||
|
||||
/**
|
||||
* Write buf to the end of the buffer queue.
|
||||
* Will write bufq content or passed `buf` directly using the `writer`
|
||||
* callback when it sees fit. 'buf' might get passed directly
|
||||
* on or is placed into the buffer, depending on `len` and current
|
||||
* amount buffered, chunk size, etc.
|
||||
*/
|
||||
ssize_t Curl_bufq_write_pass(struct bufq *q,
|
||||
const unsigned char *buf, size_t len,
|
||||
Curl_bufq_writer *writer, void *writer_ctx,
|
||||
CURLcode *err);
|
||||
|
||||
#endif /* HEADER_CURL_BUFQ_H */
|
303
lib/dynhds.c
Normal file
303
lib/dynhds.c
Normal file
@ -0,0 +1,303 @@
|
||||
/***************************************************************************
|
||||
* _ _ ____ _
|
||||
* Project ___| | | | _ \| |
|
||||
* / __| | | | |_) | |
|
||||
* | (__| |_| | _ <| |___
|
||||
* \___|\___/|_| \_\_____|
|
||||
*
|
||||
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
*
|
||||
* This software is licensed as described in the file COPYING, which
|
||||
* you should have received as part of this distribution. The terms
|
||||
* are also available at https://curl.se/docs/copyright.html.
|
||||
*
|
||||
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
||||
* copies of the Software, and permit persons to whom the Software is
|
||||
* furnished to do so, under the terms of the COPYING file.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
* SPDX-License-Identifier: curl
|
||||
*
|
||||
***************************************************************************/
|
||||
|
||||
#include "curl_setup.h"
|
||||
#include "dynhds.h"
|
||||
#include "strcase.h"
|
||||
|
||||
/* The last 3 #include files should be in this order */
|
||||
#include "curl_printf.h"
|
||||
#include "curl_memory.h"
|
||||
#include "memdebug.h"
|
||||
|
||||
|
||||
static struct dynhds_entry *
|
||||
entry_new(const char *name, size_t namelen,
|
||||
const char *value, size_t valuelen)
|
||||
{
|
||||
struct dynhds_entry *e;
|
||||
char *p;
|
||||
|
||||
DEBUGASSERT(name);
|
||||
DEBUGASSERT(value);
|
||||
e = calloc(1, sizeof(*e) + namelen + valuelen + 2);
|
||||
if(!e)
|
||||
return NULL;
|
||||
e->name = p = ((char *)e) + sizeof(*e);
|
||||
memcpy(p, name, namelen);
|
||||
e->namelen = namelen;
|
||||
e->value = p += namelen + 1; /* leave a \0 at the end of name */
|
||||
memcpy(p, value, valuelen);
|
||||
e->valuelen = valuelen;
|
||||
return e;
|
||||
}
|
||||
|
||||
static void entry_free(struct dynhds_entry *e)
|
||||
{
|
||||
free(e);
|
||||
}
|
||||
|
||||
void Curl_dynhds_init(struct dynhds *dynhds, size_t max_entries,
|
||||
size_t max_strs_size)
|
||||
{
|
||||
DEBUGASSERT(dynhds);
|
||||
DEBUGASSERT(max_strs_size);
|
||||
dynhds->hds = NULL;
|
||||
dynhds->hds_len = dynhds->hds_allc = dynhds->strs_len = 0;
|
||||
dynhds->max_entries = max_entries;
|
||||
dynhds->max_strs_size = max_strs_size;
|
||||
}
|
||||
|
||||
void Curl_dynhds_free(struct dynhds *dynhds)
|
||||
{
|
||||
DEBUGASSERT(dynhds);
|
||||
if(dynhds->hds && dynhds->hds_len) {
|
||||
size_t i;
|
||||
DEBUGASSERT(dynhds->hds);
|
||||
for(i = 0; i < dynhds->hds_len; ++i) {
|
||||
entry_free(dynhds->hds[i]);
|
||||
}
|
||||
}
|
||||
Curl_safefree(dynhds->hds);
|
||||
dynhds->hds_len = dynhds->hds_allc = dynhds->strs_len = 0;
|
||||
}
|
||||
|
||||
void Curl_dynhds_reset(struct dynhds *dynhds)
|
||||
{
|
||||
DEBUGASSERT(dynhds);
|
||||
if(dynhds->hds_len) {
|
||||
size_t i;
|
||||
DEBUGASSERT(dynhds->hds);
|
||||
for(i = 0; i < dynhds->hds_len; ++i) {
|
||||
entry_free(dynhds->hds[i]);
|
||||
dynhds->hds[i] = NULL;
|
||||
}
|
||||
}
|
||||
dynhds->hds_len = dynhds->strs_len = 0;
|
||||
}
|
||||
|
||||
size_t Curl_dynhds_count(struct dynhds *dynhds)
|
||||
{
|
||||
return dynhds->hds_len;
|
||||
}
|
||||
|
||||
struct dynhds_entry *Curl_dynhds_getn(struct dynhds *dynhds, size_t n)
|
||||
{
|
||||
DEBUGASSERT(dynhds);
|
||||
return (n < dynhds->hds_len)? dynhds->hds[n] : NULL;
|
||||
}
|
||||
|
||||
struct dynhds_entry *Curl_dynhds_get(struct dynhds *dynhds, const char *name,
|
||||
size_t namelen)
|
||||
{
|
||||
size_t i;
|
||||
for(i = 0; i < dynhds->hds_len; ++i) {
|
||||
if(dynhds->hds[i]->namelen == namelen &&
|
||||
strncasecompare(dynhds->hds[i]->name, name, namelen)) {
|
||||
return dynhds->hds[i];
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct dynhds_entry *Curl_dynhds_cget(struct dynhds *dynhds, const char *name)
|
||||
{
|
||||
return Curl_dynhds_get(dynhds, name, strlen(name));
|
||||
}
|
||||
|
||||
bool Curl_dynhds_contains(struct dynhds *dynhds,
|
||||
const char *name, size_t namelen)
|
||||
{
|
||||
return !!Curl_dynhds_get(dynhds, name, namelen);
|
||||
}
|
||||
|
||||
bool Curl_dynhds_ccontains(struct dynhds *dynhds, const char *name)
|
||||
{
|
||||
return Curl_dynhds_contains(dynhds, name, strlen(name));
|
||||
}
|
||||
|
||||
CURLcode Curl_dynhds_add(struct dynhds *dynhds,
|
||||
const char *name, size_t namelen,
|
||||
const char *value, size_t valuelen)
|
||||
{
|
||||
struct dynhds_entry *entry = NULL;
|
||||
CURLcode result = CURLE_OUT_OF_MEMORY;
|
||||
|
||||
DEBUGASSERT(dynhds);
|
||||
if(dynhds->max_entries && dynhds->hds_len >= dynhds->max_entries)
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
if(dynhds->strs_len + namelen + valuelen > dynhds->max_strs_size)
|
||||
return CURLE_OUT_OF_MEMORY;
|
||||
|
||||
entry = entry_new(name, namelen, value, valuelen);
|
||||
if(!entry)
|
||||
goto out;
|
||||
|
||||
if(dynhds->hds_len + 1 >= dynhds->hds_allc) {
|
||||
size_t nallc = dynhds->hds_len + 16;
|
||||
struct dynhds_entry **nhds;
|
||||
|
||||
if(dynhds->max_entries && nallc > dynhds->max_entries)
|
||||
nallc = dynhds->max_entries;
|
||||
|
||||
nhds = calloc(nallc, sizeof(struct dynhds_entry *));
|
||||
if(!nhds)
|
||||
goto out;
|
||||
if(dynhds->hds) {
|
||||
memcpy(nhds, dynhds->hds,
|
||||
dynhds->hds_len * sizeof(struct dynhds_entry *));
|
||||
Curl_safefree(dynhds->hds);
|
||||
}
|
||||
dynhds->hds = nhds;
|
||||
dynhds->hds_allc = nallc;
|
||||
}
|
||||
dynhds->hds[dynhds->hds_len++] = entry;
|
||||
entry = NULL;
|
||||
dynhds->strs_len += namelen + valuelen;
|
||||
result = CURLE_OK;
|
||||
|
||||
out:
|
||||
if(entry)
|
||||
entry_free(entry);
|
||||
return result;
|
||||
}
|
||||
|
||||
CURLcode Curl_dynhds_cadd(struct dynhds *dynhds,
|
||||
const char *name, const char *value)
|
||||
{
|
||||
return Curl_dynhds_add(dynhds, name, strlen(name), value, strlen(value));
|
||||
}
|
||||
|
||||
CURLcode Curl_dynhds_set(struct dynhds *dynhds,
|
||||
const char *name, size_t namelen,
|
||||
const char *value, size_t valuelen)
|
||||
{
|
||||
Curl_dynhds_remove(dynhds, name, namelen);
|
||||
return Curl_dynhds_add(dynhds, name, namelen, value, valuelen);
|
||||
}
|
||||
|
||||
CURLcode Curl_dynhds_cset(struct dynhds *dynhds,
|
||||
const char *name, const char *value)
|
||||
{
|
||||
return Curl_dynhds_set(dynhds, name, strlen(name), value, strlen(value));
|
||||
}
|
||||
|
||||
CURLcode Curl_dynhds_h1_cadd_line(struct dynhds *dynhds, const char *line)
|
||||
{
|
||||
const char *p;
|
||||
const char *name;
|
||||
size_t namelen;
|
||||
const char *value;
|
||||
size_t valuelen;
|
||||
|
||||
if(!line)
|
||||
return CURLE_OK;
|
||||
p = strchr(line, ':');
|
||||
if(!p) {
|
||||
return CURLE_BAD_FUNCTION_ARGUMENT;
|
||||
}
|
||||
|
||||
name = line;
|
||||
namelen = p - line;
|
||||
p++; /* move past the colon */
|
||||
while(ISBLANK(*p))
|
||||
p++;
|
||||
value = p;
|
||||
p = strchr(value, '\r');
|
||||
if(!p)
|
||||
p = strchr(value, '\n');
|
||||
valuelen = p? ((size_t)(p - value)) : strlen(value);
|
||||
|
||||
return Curl_dynhds_add(dynhds, name, namelen, value, valuelen);
|
||||
}
|
||||
|
||||
size_t Curl_dynhds_count_name(struct dynhds *dynhds,
|
||||
const char *name, size_t namelen)
|
||||
{
|
||||
size_t n = 0;
|
||||
if(dynhds->hds_len) {
|
||||
size_t i;
|
||||
for(i = 0; i < dynhds->hds_len; ++i) {
|
||||
if((namelen == dynhds->hds[i]->namelen) &&
|
||||
strncasecompare(name, dynhds->hds[i]->name, namelen))
|
||||
++n;
|
||||
}
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
size_t Curl_dynhds_ccount_name(struct dynhds *dynhds, const char *name)
|
||||
{
|
||||
return Curl_dynhds_count_name(dynhds, name, strlen(name));
|
||||
}
|
||||
|
||||
size_t Curl_dynhds_remove(struct dynhds *dynhds,
|
||||
const char *name, size_t namelen)
|
||||
{
|
||||
size_t n = 0;
|
||||
if(dynhds->hds_len) {
|
||||
size_t i, len;
|
||||
for(i = 0; i < dynhds->hds_len; ++i) {
|
||||
if((namelen == dynhds->hds[i]->namelen) &&
|
||||
strncasecompare(name, dynhds->hds[i]->name, namelen)) {
|
||||
++n;
|
||||
--dynhds->hds_len;
|
||||
dynhds->strs_len -= (dynhds->hds[i]->namelen +
|
||||
dynhds->hds[i]->valuelen);
|
||||
entry_free(dynhds->hds[i]);
|
||||
len = dynhds->hds_len - i; /* remaining entries */
|
||||
if(len) {
|
||||
memmove(&dynhds->hds[i], &dynhds->hds[i + 1],
|
||||
len * sizeof(dynhds->hds[i]));
|
||||
}
|
||||
--i; /* do this index again */
|
||||
}
|
||||
}
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
size_t Curl_dynhds_cremove(struct dynhds *dynhds, const char *name)
|
||||
{
|
||||
return Curl_dynhds_remove(dynhds, name, strlen(name));
|
||||
}
|
||||
|
||||
CURLcode Curl_dynhds_h1_dprint(struct dynhds *dynhds, struct dynbuf *dbuf)
|
||||
{
|
||||
CURLcode result = CURLE_OK;
|
||||
size_t i;
|
||||
|
||||
if(!dynhds->hds_len)
|
||||
return result;
|
||||
|
||||
for(i = 0; i < dynhds->hds_len; ++i) {
|
||||
result = Curl_dyn_addf(dbuf, "%.*s: %.*s\r\n",
|
||||
(int)dynhds->hds[i]->namelen, dynhds->hds[i]->name,
|
||||
(int)dynhds->hds[i]->valuelen, dynhds->hds[i]->value);
|
||||
if(result)
|
||||
break;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
155
lib/dynhds.h
Normal file
155
lib/dynhds.h
Normal file
@ -0,0 +1,155 @@
|
||||
#ifndef HEADER_CURL_DYNHDS_H
|
||||
#define HEADER_CURL_DYNHDS_H
|
||||
/***************************************************************************
|
||||
* _ _ ____ _
|
||||
* Project ___| | | | _ \| |
|
||||
* / __| | | | |_) | |
|
||||
* | (__| |_| | _ <| |___
|
||||
* \___|\___/|_| \_\_____|
|
||||
*
|
||||
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
*
|
||||
* This software is licensed as described in the file COPYING, which
|
||||
* you should have received as part of this distribution. The terms
|
||||
* are also available at https://curl.se/docs/copyright.html.
|
||||
*
|
||||
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
||||
* copies of the Software, and permit persons to whom the Software is
|
||||
* furnished to do so, under the terms of the COPYING file.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
* SPDX-License-Identifier: curl
|
||||
*
|
||||
***************************************************************************/
|
||||
#include "curl_setup.h"
|
||||
|
||||
#include <curl/curl.h>
|
||||
#include "dynbuf.h"
|
||||
|
||||
struct dynbuf;
|
||||
|
||||
/**
|
||||
* A single header entry.
|
||||
* `name` and `value` are non-NULL and always NUL terminated.
|
||||
*/
|
||||
struct dynhds_entry {
|
||||
char *name;
|
||||
char *value;
|
||||
size_t namelen;
|
||||
size_t valuelen;
|
||||
};
|
||||
|
||||
struct dynhds {
|
||||
struct dynhds_entry **hds;
|
||||
size_t hds_len; /* number of entries in hds */
|
||||
size_t hds_allc; /* size of hds allocation */
|
||||
size_t max_entries; /* size limit number of entries */
|
||||
size_t strs_len; /* length of all strings */
|
||||
size_t max_strs_size; /* max length of all strings */
|
||||
};
|
||||
|
||||
/**
|
||||
* Init for use on first time or after a reset.
|
||||
* Allow `max_entries` headers to be added, 0 for unlimited.
|
||||
* Allow size of all name and values added to not exceed `max_strs_size``
|
||||
*/
|
||||
void Curl_dynhds_init(struct dynhds *dynhds, size_t max_entries,
|
||||
size_t max_strs_size);
|
||||
/**
|
||||
* Frees all data held in `dynhds`, but not the struct itself.
|
||||
*/
|
||||
void Curl_dynhds_free(struct dynhds *dynhds);
|
||||
|
||||
/**
|
||||
* Reset `dyndns` to the initial init state. May keep allocations
|
||||
* around.
|
||||
*/
|
||||
void Curl_dynhds_reset(struct dynhds *dynhds);
|
||||
|
||||
/**
|
||||
* Return the number of header entries.
|
||||
*/
|
||||
size_t Curl_dynhds_count(struct dynhds *dynhds);
|
||||
|
||||
/**
|
||||
* Return the n-th header entry or NULL if it does not exist.
|
||||
*/
|
||||
struct dynhds_entry *Curl_dynhds_getn(struct dynhds *dynhds, size_t n);
|
||||
|
||||
/**
|
||||
* Return the 1st header entry of the name or NULL if none exists.
|
||||
*/
|
||||
struct dynhds_entry *Curl_dynhds_get(struct dynhds *dynhds,
|
||||
const char *name, size_t namelen);
|
||||
struct dynhds_entry *Curl_dynhds_cget(struct dynhds *dynhds, const char *name);
|
||||
|
||||
/**
|
||||
* Return TRUE iff one or more headers with the given name exist.
|
||||
*/
|
||||
bool Curl_dynhds_contains(struct dynhds *dynhds,
|
||||
const char *name, size_t namelen);
|
||||
bool Curl_dynhds_ccontains(struct dynhds *dynhds, const char *name);
|
||||
|
||||
/**
|
||||
* Return how often the given name appears in `dynhds`.
|
||||
* Names are case-insensitive.
|
||||
*/
|
||||
size_t Curl_dynhds_count_name(struct dynhds *dynhds,
|
||||
const char *name, size_t namelen);
|
||||
|
||||
/**
|
||||
* Return how often the given 0-terminated name appears in `dynhds`.
|
||||
* Names are case-insensitive.
|
||||
*/
|
||||
size_t Curl_dynhds_ccount_name(struct dynhds *dynhds, const char *name);
|
||||
|
||||
/**
|
||||
* Add a header, name + value, to `dynhds` at the end. Does *not*
|
||||
* check for duplicate names.
|
||||
*/
|
||||
CURLcode Curl_dynhds_add(struct dynhds *dynhds,
|
||||
const char *name, size_t namelen,
|
||||
const char *value, size_t valuelen);
|
||||
|
||||
/**
|
||||
* Add a header, c-string name + value, to `dynhds` at the end.
|
||||
*/
|
||||
CURLcode Curl_dynhds_cadd(struct dynhds *dynhds,
|
||||
const char *name, const char *value);
|
||||
|
||||
/**
|
||||
* Remove all entries with the given name.
|
||||
* Returns number of entries removed.
|
||||
*/
|
||||
size_t Curl_dynhds_remove(struct dynhds *dynhds,
|
||||
const char *name, size_t namelen);
|
||||
size_t Curl_dynhds_cremove(struct dynhds *dynhds, const char *name);
|
||||
|
||||
/**
|
||||
* Set the give header name and value, replacing any entries with
|
||||
* the same name. The header is added at the end of all (remaining)
|
||||
* entries.
|
||||
*/
|
||||
CURLcode Curl_dynhds_set(struct dynhds *dynhds,
|
||||
const char *name, size_t namelen,
|
||||
const char *value, size_t valuelen);
|
||||
CURLcode Curl_dynhds_cset(struct dynhds *dynhds,
|
||||
const char *name, const char *value);
|
||||
|
||||
/**
|
||||
* Add a single header from a HTTP/1.1 formatted line at the end. Line
|
||||
* may contain a delimiting \r\n or just \n. And characters after
|
||||
* that will be ignored.
|
||||
*/
|
||||
CURLcode Curl_dynhds_h1_cadd_line(struct dynhds *dynhds, const char *line);
|
||||
|
||||
|
||||
/**
|
||||
* Add the headers to the given `dynbuf` in HTTP/1.1 format with
|
||||
* cr+lf line endings. Will NOT output a last empty line.
|
||||
*/
|
||||
CURLcode Curl_dynhds_h1_dprint(struct dynhds *dynhds, struct dynbuf *dbuf);
|
||||
|
||||
#endif /* HEADER_CURL_DYNHDS_H */
|
271
lib/http.c
271
lib/http.c
@ -71,6 +71,7 @@
|
||||
#include "url.h"
|
||||
#include "share.h"
|
||||
#include "hostip.h"
|
||||
#include "dynhds.h"
|
||||
#include "http.h"
|
||||
#include "select.h"
|
||||
#include "parsedate.h" /* for the week day and month names */
|
||||
@ -1713,6 +1714,157 @@ CURLcode Curl_http_compile_trailers(struct curl_slist *trailers,
|
||||
return result;
|
||||
}
|
||||
|
||||
static bool hd_name_eq(const char *n1, size_t n1len,
|
||||
const char *n2, size_t n2len)
|
||||
{
|
||||
if(n1len == n2len) {
|
||||
return strncasecompare(n1, n2, n1len);
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
CURLcode Curl_dynhds_add_custom(struct Curl_easy *data,
|
||||
bool is_connect,
|
||||
struct dynhds *hds)
|
||||
{
|
||||
struct connectdata *conn = data->conn;
|
||||
char *ptr;
|
||||
struct curl_slist *h[2];
|
||||
struct curl_slist *headers;
|
||||
int numlists = 1; /* by default */
|
||||
int i;
|
||||
|
||||
#ifndef CURL_DISABLE_PROXY
|
||||
enum proxy_use proxy;
|
||||
|
||||
if(is_connect)
|
||||
proxy = HEADER_CONNECT;
|
||||
else
|
||||
proxy = conn->bits.httpproxy && !conn->bits.tunnel_proxy?
|
||||
HEADER_PROXY:HEADER_SERVER;
|
||||
|
||||
switch(proxy) {
|
||||
case HEADER_SERVER:
|
||||
h[0] = data->set.headers;
|
||||
break;
|
||||
case HEADER_PROXY:
|
||||
h[0] = data->set.headers;
|
||||
if(data->set.sep_headers) {
|
||||
h[1] = data->set.proxyheaders;
|
||||
numlists++;
|
||||
}
|
||||
break;
|
||||
case HEADER_CONNECT:
|
||||
if(data->set.sep_headers)
|
||||
h[0] = data->set.proxyheaders;
|
||||
else
|
||||
h[0] = data->set.headers;
|
||||
break;
|
||||
}
|
||||
#else
|
||||
(void)is_connect;
|
||||
h[0] = data->set.headers;
|
||||
#endif
|
||||
|
||||
/* loop through one or two lists */
|
||||
for(i = 0; i < numlists; i++) {
|
||||
for(headers = h[i]; headers; headers = headers->next) {
|
||||
const char *name, *value;
|
||||
size_t namelen, valuelen;
|
||||
|
||||
/* There are 2 quirks in place for custom headers:
|
||||
* 1. setting only 'name:' to suppress a header from being sent
|
||||
* 2. setting only 'name;' to send an empty (illegal) header
|
||||
*/
|
||||
ptr = strchr(headers->data, ':');
|
||||
if(ptr) {
|
||||
name = headers->data;
|
||||
namelen = ptr - headers->data;
|
||||
ptr++; /* pass the colon */
|
||||
while(*ptr && ISSPACE(*ptr))
|
||||
ptr++;
|
||||
if(*ptr) {
|
||||
value = ptr;
|
||||
valuelen = strlen(value);
|
||||
}
|
||||
else {
|
||||
/* quirk #1, suppress this header */
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else {
|
||||
ptr = strchr(headers->data, ';');
|
||||
|
||||
if(!ptr) {
|
||||
/* neither : nor ; in provided header value. We seem
|
||||
* to ignore this silently */
|
||||
continue;
|
||||
}
|
||||
|
||||
name = headers->data;
|
||||
namelen = ptr - headers->data;
|
||||
ptr++; /* pass the semicolon */
|
||||
while(*ptr && ISSPACE(*ptr))
|
||||
ptr++;
|
||||
if(!*ptr) {
|
||||
/* quirk #2, send an empty header */
|
||||
value = "";
|
||||
valuelen = 0;
|
||||
}
|
||||
else {
|
||||
/* this may be used for something else in the future,
|
||||
* ignore this for now */
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
DEBUGASSERT(name && value);
|
||||
if(data->state.aptr.host &&
|
||||
/* a Host: header was sent already, don't pass on any custom Host:
|
||||
header as that will produce *two* in the same request! */
|
||||
hd_name_eq(name, namelen, STRCONST("Host:")))
|
||||
;
|
||||
else if(data->state.httpreq == HTTPREQ_POST_FORM &&
|
||||
/* this header (extended by formdata.c) is sent later */
|
||||
hd_name_eq(name, namelen, STRCONST("Content-Type:")))
|
||||
;
|
||||
else if(data->state.httpreq == HTTPREQ_POST_MIME &&
|
||||
/* this header is sent later */
|
||||
hd_name_eq(name, namelen, STRCONST("Content-Type:")))
|
||||
;
|
||||
else if(conn->bits.authneg &&
|
||||
/* while doing auth neg, don't allow the custom length since
|
||||
we will force length zero then */
|
||||
hd_name_eq(name, namelen, STRCONST("Content-Length:")))
|
||||
;
|
||||
else if(data->state.aptr.te &&
|
||||
/* when asking for Transfer-Encoding, don't pass on a custom
|
||||
Connection: */
|
||||
hd_name_eq(name, namelen, STRCONST("Connection:")))
|
||||
;
|
||||
else if((conn->httpversion >= 20) &&
|
||||
hd_name_eq(name, namelen, STRCONST("Transfer-Encoding:")))
|
||||
/* HTTP/2 doesn't support chunked requests */
|
||||
;
|
||||
else if((hd_name_eq(name, namelen, STRCONST("Authorization:")) ||
|
||||
hd_name_eq(name, namelen, STRCONST("Cookie:"))) &&
|
||||
/* be careful of sending this potentially sensitive header to
|
||||
other hosts */
|
||||
!Curl_auth_allowed_to_host(data))
|
||||
;
|
||||
else {
|
||||
CURLcode result;
|
||||
|
||||
result = Curl_dynhds_add(hds, name, namelen, value, valuelen);
|
||||
if(result)
|
||||
return result;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
CURLcode Curl_add_custom_headers(struct Curl_easy *data,
|
||||
bool is_connect,
|
||||
#ifndef USE_HYPER
|
||||
@ -4344,4 +4496,123 @@ CURLcode Curl_http_readwrite_headers(struct Curl_easy *data,
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
|
||||
/* Decode HTTP status code string. */
|
||||
CURLcode Curl_http_decode_status(int *pstatus, const char *s, size_t len)
|
||||
{
|
||||
CURLcode result = CURLE_BAD_FUNCTION_ARGUMENT;
|
||||
int status = 0;
|
||||
int i;
|
||||
|
||||
if(len != 3)
|
||||
goto out;
|
||||
|
||||
for(i = 0; i < 3; ++i) {
|
||||
char c = s[i];
|
||||
|
||||
if(c < '0' || c > '9')
|
||||
goto out;
|
||||
|
||||
status *= 10;
|
||||
status += c - '0';
|
||||
}
|
||||
result = CURLE_OK;
|
||||
out:
|
||||
*pstatus = result? -1 : status;
|
||||
return result;
|
||||
}
|
||||
|
||||
CURLcode Curl_http_req_make(struct http_req **preq,
|
||||
const char *method,
|
||||
const char *scheme,
|
||||
const char *authority,
|
||||
const char *path)
|
||||
{
|
||||
struct http_req *req;
|
||||
CURLcode result = CURLE_OUT_OF_MEMORY;
|
||||
size_t mlen;
|
||||
|
||||
DEBUGASSERT(method);
|
||||
mlen = strlen(method);
|
||||
if(mlen + 1 >= sizeof(req->method))
|
||||
return CURLE_BAD_FUNCTION_ARGUMENT;
|
||||
|
||||
req = calloc(1, sizeof(*req));
|
||||
if(!req)
|
||||
goto out;
|
||||
memcpy(req->method, method, mlen);
|
||||
if(scheme) {
|
||||
req->scheme = strdup(scheme);
|
||||
if(!req->scheme)
|
||||
goto out;
|
||||
}
|
||||
if(authority) {
|
||||
req->authority = strdup(authority);
|
||||
if(!req->authority)
|
||||
goto out;
|
||||
}
|
||||
if(path) {
|
||||
req->path = strdup(path);
|
||||
if(!req->path)
|
||||
goto out;
|
||||
}
|
||||
Curl_dynhds_init(&req->headers, 128, DYN_H2_HEADERS);
|
||||
result = CURLE_OK;
|
||||
|
||||
out:
|
||||
if(result && req)
|
||||
Curl_http_req_free(req);
|
||||
*preq = result? NULL : req;
|
||||
return result;
|
||||
}
|
||||
|
||||
void Curl_http_req_free(struct http_req *req)
|
||||
{
|
||||
if(req) {
|
||||
free(req->scheme);
|
||||
free(req->authority);
|
||||
free(req->path);
|
||||
Curl_dynhds_free(&req->headers);
|
||||
free(req);
|
||||
}
|
||||
}
|
||||
|
||||
CURLcode Curl_http_resp_make(struct http_resp **presp,
|
||||
int status,
|
||||
const char *description)
|
||||
{
|
||||
struct http_resp *resp;
|
||||
CURLcode result = CURLE_OUT_OF_MEMORY;
|
||||
|
||||
resp = calloc(1, sizeof(*resp));
|
||||
if(!resp)
|
||||
goto out;
|
||||
|
||||
resp->status = status;
|
||||
if(description) {
|
||||
resp->description = strdup(description);
|
||||
if(!resp->description)
|
||||
goto out;
|
||||
}
|
||||
Curl_dynhds_init(&resp->headers, 128, DYN_H2_HEADERS);
|
||||
result = CURLE_OK;
|
||||
|
||||
out:
|
||||
if(result && resp)
|
||||
Curl_http_resp_free(resp);
|
||||
*presp = result? NULL : resp;
|
||||
return result;
|
||||
}
|
||||
|
||||
void Curl_http_resp_free(struct http_resp *resp)
|
||||
{
|
||||
if(resp) {
|
||||
free(resp->description);
|
||||
Curl_dynhds_free(&resp->headers);
|
||||
if(resp->prev)
|
||||
Curl_http_resp_free(resp->prev);
|
||||
free(resp);
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* CURL_DISABLE_HTTP */
|
||||
|
50
lib/http.h
50
lib/http.h
@ -29,6 +29,7 @@
|
||||
#include <pthread.h>
|
||||
#endif
|
||||
|
||||
#include "dynhds.h"
|
||||
#include "ws.h"
|
||||
|
||||
typedef enum {
|
||||
@ -60,6 +61,7 @@ extern const struct Curl_handler Curl_handler_wss;
|
||||
#endif
|
||||
#endif /* websockets */
|
||||
|
||||
struct dynhds;
|
||||
|
||||
/* Header specific functions */
|
||||
bool Curl_compareheader(const char *headerline, /* line to check */
|
||||
@ -97,6 +99,10 @@ CURLcode Curl_add_custom_headers(struct Curl_easy *data,
|
||||
void *headers
|
||||
#endif
|
||||
);
|
||||
CURLcode Curl_dynhds_add_custom(struct Curl_easy *data,
|
||||
bool is_connect,
|
||||
struct dynhds *hds);
|
||||
|
||||
CURLcode Curl_http_compile_trailers(struct curl_slist *trailers,
|
||||
struct dynbuf *buf,
|
||||
struct Curl_easy *handle);
|
||||
@ -328,4 +334,48 @@ Curl_http_output_auth(struct Curl_easy *data,
|
||||
bool proxytunnel); /* TRUE if this is the request setting
|
||||
up the proxy tunnel */
|
||||
|
||||
/* Decode HTTP status code string. */
|
||||
CURLcode Curl_http_decode_status(int *pstatus, const char *s, size_t len);
|
||||
|
||||
/**
|
||||
* All about a core HTTP request, excluding body and trailers
|
||||
*/
|
||||
struct http_req {
|
||||
char method[12];
|
||||
char *scheme;
|
||||
char *authority;
|
||||
char *path;
|
||||
struct dynhds headers;
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a HTTP request struct.
|
||||
*/
|
||||
CURLcode Curl_http_req_make(struct http_req **preq,
|
||||
const char *method,
|
||||
const char *scheme,
|
||||
const char *authority,
|
||||
const char *path);
|
||||
|
||||
void Curl_http_req_free(struct http_req *req);
|
||||
|
||||
/**
|
||||
* All about a core HTTP response, excluding body and trailers
|
||||
*/
|
||||
struct http_resp {
|
||||
int status;
|
||||
char *description;
|
||||
struct dynhds headers;
|
||||
struct http_resp *prev;
|
||||
};
|
||||
|
||||
/**
|
||||
* Create a HTTP response struct.
|
||||
*/
|
||||
CURLcode Curl_http_resp_make(struct http_resp **presp,
|
||||
int status,
|
||||
const char *description);
|
||||
|
||||
void Curl_http_resp_free(struct http_resp *resp);
|
||||
|
||||
#endif /* HEADER_CURL_HTTP_H */
|
||||
|
@ -250,7 +250,7 @@ test2400 test2401 test2402 test2403 \
|
||||
\
|
||||
test2500 test2501 test2502 test2503 \
|
||||
\
|
||||
test2600 \
|
||||
test2600 test2601 test2602 \
|
||||
\
|
||||
test3000 test3001 test3002 test3003 test3004 test3005 test3006 test3007 \
|
||||
test3008 test3009 test3010 test3011 test3012 test3013 test3014 test3015 \
|
||||
|
22
tests/data/test2601
Normal file
22
tests/data/test2601
Normal file
@ -0,0 +1,22 @@
|
||||
<testcase>
|
||||
<info>
|
||||
<keywords>
|
||||
unittest
|
||||
bufq
|
||||
</keywords>
|
||||
</info>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
none
|
||||
</server>
|
||||
<features>
|
||||
unittest
|
||||
</features>
|
||||
<name>
|
||||
bufq unit tests
|
||||
</name>
|
||||
</client>
|
||||
</testcase>
|
22
tests/data/test2602
Normal file
22
tests/data/test2602
Normal file
@ -0,0 +1,22 @@
|
||||
<testcase>
|
||||
<info>
|
||||
<keywords>
|
||||
unittest
|
||||
dynhds
|
||||
</keywords>
|
||||
</info>
|
||||
|
||||
#
|
||||
# Client-side
|
||||
<client>
|
||||
<server>
|
||||
none
|
||||
</server>
|
||||
<features>
|
||||
unittest
|
||||
</features>
|
||||
<name>
|
||||
dynhds unit tests
|
||||
</name>
|
||||
</client>
|
||||
</testcase>
|
@ -38,7 +38,7 @@ include_directories(
|
||||
# or else they will fail to link. Some of the tests require the special libcurlu
|
||||
# build, so filter those out until we get libcurlu.
|
||||
list(FILTER UNITPROGS EXCLUDE REGEX
|
||||
"unit1394|unit1395|unit1604|unit1608|unit1621|unit1650|unit1653|unit1655|unit1660|unit2600")
|
||||
"unit1394|unit1395|unit1604|unit1608|unit1621|unit1650|unit1653|unit1655|unit1660|unit2600|unit2601|unit2602")
|
||||
if(NOT BUILD_SHARED_LIBS)
|
||||
foreach(_testfile ${UNITPROGS})
|
||||
add_executable(${_testfile} EXCLUDE_FROM_ALL ${_testfile}.c ${UNITFILES})
|
||||
|
@ -154,4 +154,8 @@ unit1661_SOURCES = unit1661.c $(UNITFILES)
|
||||
|
||||
unit2600_SOURCES = unit2600.c $(UNITFILES)
|
||||
|
||||
unit2601_SOURCES = unit2601.c $(UNITFILES)
|
||||
|
||||
unit2602_SOURCES = unit2602.c $(UNITFILES)
|
||||
|
||||
unit3200_SOURCES = unit3200.c $(UNITFILES)
|
||||
|
@ -38,5 +38,5 @@ UNITPROGS = unit1300 unit1302 unit1303 unit1304 unit1305 unit1307 \
|
||||
unit1620 unit1621 \
|
||||
unit1650 unit1651 unit1652 unit1653 unit1654 unit1655 \
|
||||
unit1660 unit1661 \
|
||||
unit2600 \
|
||||
unit2600 unit2601 unit2602 \
|
||||
unit3200
|
||||
|
246
tests/unit/unit2601.c
Normal file
246
tests/unit/unit2601.c
Normal file
@ -0,0 +1,246 @@
|
||||
/***************************************************************************
|
||||
* _ _ ____ _
|
||||
* Project ___| | | | _ \| |
|
||||
* / __| | | | |_) | |
|
||||
* | (__| |_| | _ <| |___
|
||||
* \___|\___/|_| \_\_____|
|
||||
*
|
||||
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
*
|
||||
* This software is licensed as described in the file COPYING, which
|
||||
* you should have received as part of this distribution. The terms
|
||||
* are also available at https://curl.se/docs/copyright.html.
|
||||
*
|
||||
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
||||
* copies of the Software, and permit persons to whom the Software is
|
||||
* furnished to do so, under the terms of the COPYING file.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
* SPDX-License-Identifier: curl
|
||||
*
|
||||
***************************************************************************/
|
||||
#include "curlcheck.h"
|
||||
|
||||
#include "urldata.h"
|
||||
#include "bufq.h"
|
||||
#include "curl_log.h"
|
||||
|
||||
static CURLcode unit_setup(void)
|
||||
{
|
||||
CURLcode res = CURLE_OK;
|
||||
return res;
|
||||
}
|
||||
|
||||
static void unit_stop(void)
|
||||
{
|
||||
}
|
||||
|
||||
static const char *tail_err(struct bufq *q)
|
||||
{
|
||||
struct buf_chunk *chunk;
|
||||
|
||||
if(!q->tail) {
|
||||
return q->head? "tail is NULL, but head is not" : NULL;
|
||||
}
|
||||
|
||||
chunk = q->head;
|
||||
while(chunk) {
|
||||
if(chunk == q->tail) {
|
||||
if(chunk->next) {
|
||||
return "tail points to queue, but not at the end";
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
chunk = chunk->next;
|
||||
}
|
||||
return "tail not part of queue";
|
||||
}
|
||||
|
||||
static void dump_bufq(struct bufq *q, const char *msg)
|
||||
{
|
||||
struct buf_chunk *chunk;
|
||||
const char *terr;
|
||||
size_t n;
|
||||
|
||||
fprintf(stderr, "bufq[chunk_size=%zu, max_chunks=%zu] %s\n",
|
||||
q->chunk_size, q->max_chunks, msg);
|
||||
fprintf(stderr, "- queue[\n");
|
||||
chunk = q->head;
|
||||
while(chunk) {
|
||||
fprintf(stderr, " chunk[len=%zu, roff=%zu, woff=%zu]\n",
|
||||
chunk->dlen, chunk->r_offset, chunk->w_offset);
|
||||
chunk = chunk->next;
|
||||
}
|
||||
fprintf(stderr, " ]\n");
|
||||
terr = tail_err(q);
|
||||
fprintf(stderr, "- tail: %s\n", terr? terr : "ok");
|
||||
n = 0;
|
||||
chunk = q->spare;
|
||||
while(chunk) {
|
||||
++n;
|
||||
chunk = chunk->next;
|
||||
}
|
||||
fprintf(stderr, "- chunks: %zu\n", q->chunk_count);
|
||||
fprintf(stderr, "- spares: %zu\n", n);
|
||||
}
|
||||
|
||||
static unsigned char test_data[32*1024];
|
||||
|
||||
static void check_bufq(size_t pool_spares,
|
||||
size_t chunk_size, size_t max_chunks,
|
||||
size_t wsize, size_t rsize, int opts)
|
||||
{
|
||||
struct bufq q;
|
||||
struct bufc_pool pool;
|
||||
size_t max_len = chunk_size * max_chunks;
|
||||
CURLcode result;
|
||||
ssize_t n, i;
|
||||
size_t nwritten, nread;
|
||||
|
||||
if(pool_spares > 0) {
|
||||
Curl_bufcp_init(&pool, chunk_size, pool_spares);
|
||||
Curl_bufq_initp(&q, &pool, max_chunks, opts);
|
||||
}
|
||||
else {
|
||||
Curl_bufq_init2(&q, chunk_size, max_chunks, opts);
|
||||
}
|
||||
|
||||
fail_unless(q.chunk_size == chunk_size, "chunk_size init wrong");
|
||||
fail_unless(q.max_chunks == max_chunks, "max_chunks init wrong");
|
||||
fail_unless(q.head == NULL, "init: head not NULL");
|
||||
fail_unless(q.tail == NULL, "init: tail not NULL");
|
||||
fail_unless(q.spare == NULL, "init: spare not NULL");
|
||||
fail_unless(Curl_bufq_len(&q) == 0, "init: bufq length != 0");
|
||||
|
||||
n = Curl_bufq_write(&q, test_data, wsize, &result);
|
||||
fail_unless(n >= 0, "write: negative size returned");
|
||||
fail_unless((size_t)n <= wsize, "write: wrong size returned");
|
||||
fail_unless(result == CURLE_OK, "write: wrong result returned");
|
||||
|
||||
/* write empty bufq full */
|
||||
nwritten = 0;
|
||||
Curl_bufq_reset(&q);
|
||||
while(!Curl_bufq_is_full(&q)) {
|
||||
n = Curl_bufq_write(&q, test_data, wsize, &result);
|
||||
if(n >= 0) {
|
||||
nwritten += (size_t)n;
|
||||
}
|
||||
else if(result != CURLE_AGAIN) {
|
||||
fail_unless(result == CURLE_AGAIN, "write-loop: unexpected result");
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(nwritten != max_len) {
|
||||
fprintf(stderr, "%zu bytes written, but max_len=%zu\n",
|
||||
nwritten, max_len);
|
||||
dump_bufq(&q, "after writing full");
|
||||
fail_if(TRUE, "write: bufq full but nwritten wrong");
|
||||
}
|
||||
|
||||
/* read full bufq empty */
|
||||
nread = 0;
|
||||
while(!Curl_bufq_is_empty(&q)) {
|
||||
n = Curl_bufq_read(&q, test_data, rsize, &result);
|
||||
if(n >= 0) {
|
||||
nread += (size_t)n;
|
||||
}
|
||||
else if(result != CURLE_AGAIN) {
|
||||
fail_unless(result == CURLE_AGAIN, "read-loop: unexpected result");
|
||||
break;
|
||||
}
|
||||
}
|
||||
if(nread != max_len) {
|
||||
fprintf(stderr, "%zu bytes read, but max_len=%zu\n",
|
||||
nwritten, max_len);
|
||||
dump_bufq(&q, "after reading empty");
|
||||
fail_if(TRUE, "read: bufq empty but nread wrong");
|
||||
}
|
||||
if(q.tail) {
|
||||
dump_bufq(&q, "after reading empty");
|
||||
fail_if(TRUE, "read empty, but tail is not NULL");
|
||||
}
|
||||
|
||||
for(i = 0; i < 1000; ++i) {
|
||||
n = Curl_bufq_write(&q, test_data, wsize, &result);
|
||||
if(n < 0 && result != CURLE_AGAIN) {
|
||||
fail_unless(result == CURLE_AGAIN, "rw-loop: unexpected write result");
|
||||
break;
|
||||
}
|
||||
n = Curl_bufq_read(&q, test_data, rsize, &result);
|
||||
if(n < 0 && result != CURLE_AGAIN) {
|
||||
fail_unless(result == CURLE_AGAIN, "rw-loop: unexpected read result");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Test SOFT_LIMIT option */
|
||||
Curl_bufq_free(&q);
|
||||
Curl_bufq_init2(&q, chunk_size, max_chunks, (opts|BUFQ_OPT_SOFT_LIMIT));
|
||||
nwritten = 0;
|
||||
while(!Curl_bufq_is_full(&q)) {
|
||||
n = Curl_bufq_write(&q, test_data, wsize, &result);
|
||||
if(n < 0 || (size_t)n != wsize) {
|
||||
fail_unless(n > 0 && (size_t)n == wsize, "write should be complete");
|
||||
break;
|
||||
}
|
||||
nwritten += (size_t)n;
|
||||
}
|
||||
if(nwritten < max_len) {
|
||||
fprintf(stderr, "%zu bytes written, but max_len=%zu\n",
|
||||
nwritten, max_len);
|
||||
dump_bufq(&q, "after writing full");
|
||||
fail_if(TRUE, "write: bufq full but nwritten wrong");
|
||||
}
|
||||
/* do one more write on a full bufq, should work */
|
||||
n = Curl_bufq_write(&q, test_data, wsize, &result);
|
||||
fail_unless(n > 0 && (size_t)n == wsize, "write should be complete");
|
||||
nwritten += (size_t)n;
|
||||
/* see that we get all out again */
|
||||
nread = 0;
|
||||
while(!Curl_bufq_is_empty(&q)) {
|
||||
n = Curl_bufq_read(&q, test_data, rsize, &result);
|
||||
if(n <= 0) {
|
||||
fail_unless(n > 0, "read-loop: unexpected fail");
|
||||
break;
|
||||
}
|
||||
nread += (size_t)n;
|
||||
}
|
||||
fail_unless(nread == nwritten, "did not get the same out as put in");
|
||||
|
||||
dump_bufq(&q, "at end of test");
|
||||
Curl_bufq_free(&q);
|
||||
if(pool_spares > 0)
|
||||
Curl_bufcp_free(&pool);
|
||||
}
|
||||
|
||||
UNITTEST_START
|
||||
struct bufq q;
|
||||
ssize_t n;
|
||||
CURLcode result;
|
||||
unsigned char buf[16*1024];
|
||||
|
||||
Curl_bufq_init(&q, 8*1024, 12);
|
||||
n = Curl_bufq_read(&q, buf, 128, &result);
|
||||
fail_unless(n < 0 && result == CURLE_AGAIN, "read empty fail");
|
||||
Curl_bufq_free(&q);
|
||||
|
||||
check_bufq(0, 1024, 4, 128, 128, BUFQ_OPT_NONE);
|
||||
check_bufq(0, 1024, 4, 129, 127, BUFQ_OPT_NONE);
|
||||
check_bufq(0, 1024, 4, 2000, 16000, BUFQ_OPT_NONE);
|
||||
check_bufq(0, 1024, 4, 16000, 3000, BUFQ_OPT_NONE);
|
||||
|
||||
check_bufq(0, 8000, 10, 1234, 1234, BUFQ_OPT_NONE);
|
||||
check_bufq(0, 8000, 10, 8*1024, 4*1024, BUFQ_OPT_NONE);
|
||||
|
||||
check_bufq(0, 1024, 4, 128, 128, BUFQ_OPT_NO_SPARES);
|
||||
check_bufq(0, 1024, 4, 129, 127, BUFQ_OPT_NO_SPARES);
|
||||
check_bufq(0, 1024, 4, 2000, 16000, BUFQ_OPT_NO_SPARES);
|
||||
check_bufq(0, 1024, 4, 16000, 3000, BUFQ_OPT_NO_SPARES);
|
||||
|
||||
check_bufq(8, 1024, 4, 128, 128, BUFQ_OPT_NONE);
|
||||
check_bufq(8, 8000, 10, 1234, 1234, BUFQ_OPT_NONE);
|
||||
check_bufq(8, 1024, 4, 129, 127, BUFQ_OPT_NO_SPARES);
|
||||
|
||||
UNITTEST_STOP
|
125
tests/unit/unit2602.c
Normal file
125
tests/unit/unit2602.c
Normal file
@ -0,0 +1,125 @@
|
||||
/***************************************************************************
|
||||
* _ _ ____ _
|
||||
* Project ___| | | | _ \| |
|
||||
* / __| | | | |_) | |
|
||||
* | (__| |_| | _ <| |___
|
||||
* \___|\___/|_| \_\_____|
|
||||
*
|
||||
* Copyright (C) Daniel Stenberg, <daniel@haxx.se>, et al.
|
||||
*
|
||||
* This software is licensed as described in the file COPYING, which
|
||||
* you should have received as part of this distribution. The terms
|
||||
* are also available at https://curl.se/docs/copyright.html.
|
||||
*
|
||||
* You may opt to use, copy, modify, merge, publish, distribute and/or sell
|
||||
* copies of the Software, and permit persons to whom the Software is
|
||||
* furnished to do so, under the terms of the COPYING file.
|
||||
*
|
||||
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
|
||||
* KIND, either express or implied.
|
||||
*
|
||||
* SPDX-License-Identifier: curl
|
||||
*
|
||||
***************************************************************************/
|
||||
#include "curlcheck.h"
|
||||
|
||||
#include "urldata.h"
|
||||
#include "dynbuf.h"
|
||||
#include "dynhds.h"
|
||||
#include "curl_log.h"
|
||||
|
||||
static CURLcode unit_setup(void)
|
||||
{
|
||||
return CURLE_OK;
|
||||
}
|
||||
|
||||
static void unit_stop(void)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
UNITTEST_START
|
||||
|
||||
struct dynhds hds;
|
||||
struct dynbuf dbuf;
|
||||
CURLcode result;
|
||||
size_t i;
|
||||
|
||||
/* add 1 more header than allowed */
|
||||
Curl_dynhds_init(&hds, 2, 128);
|
||||
fail_if(Curl_dynhds_count(&hds), "should be empty");
|
||||
fail_if(Curl_dynhds_add(&hds, "test1", 5, "123", 3), "add failed");
|
||||
fail_if(Curl_dynhds_add(&hds, "test2", 5, "456", 3), "add failed");
|
||||
/* remove and add without exceeding limits */
|
||||
for(i = 0; i < 100; ++i) {
|
||||
if(Curl_dynhds_remove(&hds, "test2", 5) != 1) {
|
||||
fail_if(TRUE, "should");
|
||||
break;
|
||||
}
|
||||
if(Curl_dynhds_add(&hds, "test2", 5, "456", 3)) {
|
||||
fail_if(TRUE, "add failed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
fail_unless(Curl_dynhds_count(&hds) == 2, "should hold 2");
|
||||
/* set, replacing previous entry without exceeding limits */
|
||||
for(i = 0; i < 100; ++i) {
|
||||
if(Curl_dynhds_set(&hds, "test2", 5, "456", 3)) {
|
||||
fail_if(TRUE, "add failed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
fail_unless(Curl_dynhds_count(&hds) == 2, "should hold 2");
|
||||
/* exceed limit on # of entries */
|
||||
result = Curl_dynhds_add(&hds, "test3", 5, "789", 3);
|
||||
fail_unless(result, "add should have failed");
|
||||
|
||||
fail_unless(Curl_dynhds_count_name(&hds, "test", 4) == 0, "false positive");
|
||||
fail_unless(Curl_dynhds_count_name(&hds, "test1", 4) == 0, "false positive");
|
||||
fail_if(Curl_dynhds_get(&hds, "test1", 4), "false positive");
|
||||
fail_unless(Curl_dynhds_get(&hds, "test1", 5), "false negative");
|
||||
fail_unless(Curl_dynhds_count_name(&hds, "test1", 5) == 1, "should");
|
||||
fail_unless(Curl_dynhds_ccount_name(&hds, "test2") == 1, "should");
|
||||
fail_unless(Curl_dynhds_cget(&hds, "test2"), "should");
|
||||
fail_unless(Curl_dynhds_ccount_name(&hds, "TEST2") == 1, "should");
|
||||
fail_unless(Curl_dynhds_ccontains(&hds, "TesT2"), "should");
|
||||
fail_unless(Curl_dynhds_contains(&hds, "TeSt2", 5), "should");
|
||||
Curl_dynhds_free(&hds);
|
||||
|
||||
/* add header exceeding max overall length */
|
||||
Curl_dynhds_init(&hds, 128, 10);
|
||||
fail_if(Curl_dynhds_add(&hds, "test1", 5, "123", 3), "add failed");
|
||||
fail_unless(Curl_dynhds_add(&hds, "test2", 5, "456", 3), "should fail");
|
||||
fail_if(Curl_dynhds_add(&hds, "t", 1, "1", 1), "add failed");
|
||||
Curl_dynhds_reset(&hds);
|
||||
Curl_dynhds_free(&hds);
|
||||
|
||||
Curl_dynhds_init(&hds, 128, 4*1024);
|
||||
fail_if(Curl_dynhds_add(&hds, "test1", 5, "123", 3), "add failed");
|
||||
fail_if(Curl_dynhds_add(&hds, "test1", 5, "123", 3), "add failed");
|
||||
fail_if(Curl_dynhds_cadd(&hds, "blablabla", "thingies"), "add failed");
|
||||
fail_if(Curl_dynhds_h1_cadd_line(&hds, "blablabla: thingies"), "add failed");
|
||||
fail_unless(Curl_dynhds_ccount_name(&hds, "blablabla") == 2, "should");
|
||||
fail_unless(Curl_dynhds_cremove(&hds, "blablabla") == 2, "should");
|
||||
fail_if(Curl_dynhds_ccontains(&hds, "blablabla"), "should not");
|
||||
|
||||
result = Curl_dynhds_h1_cadd_line(&hds, "blablabla thingies");
|
||||
fail_unless(result, "add should have failed");
|
||||
if(!result) {
|
||||
fail_unless(Curl_dynhds_ccount_name(&hds, "bLABlaBlA") == 0, "should");
|
||||
fail_if(Curl_dynhds_cadd(&hds, "Bla-Bla", "thingies"), "add failed");
|
||||
|
||||
Curl_dyn_init(&dbuf, 32*1024);
|
||||
fail_if(Curl_dynhds_h1_dprint(&hds, &dbuf), "h1 print failed");
|
||||
if(Curl_dyn_ptr(&dbuf)) {
|
||||
fprintf(stderr, "%s", Curl_dyn_ptr(&dbuf));
|
||||
fail_if(strcmp(Curl_dyn_ptr(&dbuf),
|
||||
"test1: 123\r\ntest1: 123\r\nBla-Bla: thingies\r\n"),
|
||||
"h1 format differs");
|
||||
}
|
||||
Curl_dyn_free(&dbuf);
|
||||
}
|
||||
|
||||
Curl_dynhds_free(&hds);
|
||||
|
||||
UNITTEST_STOP
|
Loading…
Reference in New Issue
Block a user