QUIC Send Stream Management

Reviewed-by: Matt Caswell <matt@openssl.org>
Reviewed-by: Tomas Mraz <tomas@openssl.org>
(Merged from https://github.com/openssl/openssl/pull/19159)
This commit is contained in:
Hugo Landau 2022-09-06 13:23:29 +01:00 committed by Tomas Mraz
parent 928f15e71b
commit 8302259013
10 changed files with 1642 additions and 387 deletions

View File

@ -0,0 +1,270 @@
/*
* Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#ifndef OSSL_INTERNAL_QUIC_STREAM_H
# define OSSL_INTERNAL_QUIC_STREAM_H
# pragma once
#include "internal/e_os.h"
#include "internal/time.h"
#include "internal/quic_types.h"
#include "internal/quic_wire.h"
#include "internal/quic_record_tx.h"
/*
* QUIC Send Stream
* ================
*
* The QUIC Send Stream Manager (QUIC_SSTREAM) is responsible for:
*
* - accepting octet strings of stream data;
*
* - generating corresponding STREAM frames;
*
* - receiving notifications of lost frames, in order to generate new STREAM
* frames for the lost data;
*
* - receiving notifications of acknowledged frames, in order to internally
* reuse memory used to store acknowledged stream data;
*
* - informing the caller of how much more stream data it can accept into
* its internal buffers, so as to ensure that the amount of unacknowledged
* data which can be written to a stream is not infinite and to allow the
* caller to manifest backpressure conditions to the user.
*
* The QUIC_SSTREAM is instantiated once for every stream with a send component
* (i.e., for a unidirectional send stream or for the send component of a
* bidirectional stream).
*
* Note: The terms 'TX' and 'RX' are used when referring to frames, packets and
* datagrams. The terms 'send' and 'receive' are used when referring to the
* stream abstraction. Applications send; we transmit.
*/
typedef struct quic_sstream_st QUIC_SSTREAM;
/*
* Instantiates a new QUIC_SSTREAM. init_buf_size specifies the initial size of
* the stream data buffer in bytes, which must be positive.
*/
QUIC_SSTREAM *ossl_quic_sstream_new(size_t init_buf_size);
/*
* Frees a QUIC_SSTREAM and associated stream data storage.
*
* Any iovecs returned by ossl_quic_sstream_get_stream_frame cease to be valid after
* calling this function.
*/
void ossl_quic_sstream_free(QUIC_SSTREAM *qss);
/*
* (For TX packetizer use.) Retrieves information about application stream data
* which is ready for transmission.
*
* *hdr is filled with the logical offset, maximum possible length of stream
* data which can be transmitted, and a pointer to the stream data to be
* transmitted. is_fin is set to 1 if hdr->offset + hdr->len is the final size
* of the stream and 0 otherwise. hdr->stream_id is not set; the caller must set
* it.
*
* The caller is not obligated to send all of the data. If the caller does not
* send all of the data, the caller must reduce hdr->len before serializing the
* header structure and must ensure that hdr->is_fin is cleared.
*
* hdr->has_explicit_len is always set. It is the caller's responsibility to
* clear this if it wants to use the optimization of omitting the length field,
* as only the caller can know when this optimization can be performed.
*
* *num_iov must be set to the size of the iov array at call time. When this
* function returns successfully, it is updated to the number of iov entries
* which have been written.
*
* The stream data may be split across up to two IOVs due to internal ring
* buffer organisation. The sum of the lengths of the IOVs and the value written
* to hdr->len will always match. If the caller decides to send less than
* hdr->len of stream data, it must adjust the IOVs accordingly. This may be
* done by updating hdr->len and then calling the utility function
* ossl_quic_sstream_adjust_iov().
*
* After committing one or more bytes returned by ossl_quic_sstream_get_stream_frame to a
* packet, call ossl_quic_sstream_mark_transmitted with the inclusive range of logical
* byte numbers of the transmitted bytes (i.e., hdr->offset, hdr->offset +
* hdr->len - 1). If you do not call ossl_quic_sstream_mark_transmitted, the next call to
* ossl_quic_sstream_get_stream_frame will return the same data (or potentially the same
* and more, if more data has been appended by the application).
*
* It is the caller's responsibility to clamp the length of data which this
* function indicates is available according to other concerns, such as
* stream-level flow control, connection-level flow control, or the applicable
* maximum datagram payload length (MDPL) for a packet under construction.
*
* The skip argument can usually be given as zero. If it is non-zero, this
* function outputs a range which would be output if it were called again after
* calling ossl_quic_sstream_mark_transmitted() with the returned range, repeated 'skip'
* times, and so on. This may be useful for callers which wish to enumerate
* available stream frames and batch their calls to ossl_quic_sstream_mark_transmitted at
* a later time.
*
* On success, this function will never write *num_iov with a value other than
* 0, 1 or 2. A *num_iov value of 0 can only occurs when hdr->is_fin is set (for
* example, when a stream is closed after all existing data has been sent, and
* without sending any more data); otherwise the function returns 0 as there is
* nothing useful to report.
*
* Returns 1 on success and 0 if there is no stream data available for
* transmission, or on other error (such as if the caller provides fewer
* than two IOVs.)
*/
int ossl_quic_sstream_get_stream_frame(QUIC_SSTREAM *qss,
size_t skip,
OSSL_QUIC_FRAME_STREAM *hdr,
OSSL_QTX_IOVEC *iov,
size_t *num_iov);
/*
* (For TX packetizer use.) Marks a logical range of the send stream as having
* been transmitted.
*
* 0 denotes the first byte ever sent on the stream. The start and end values
* are both inclusive, therefore all calls to this function always mark at least
* one byte as being transmitted; if no bytes have been transmitted, do not call
* this function.
*
* If the STREAM frame sent had the FIN bit set, you must also call
* ossl_quic_sstream_mark_transmitted_fin() after calling this function.
*
* If you sent a zero-length STREAM frame with the FIN bit set, you need only
* call ossl_quic_sstream_mark_transmitted_fin() and must not call this function.
*
* Returns 1 on success and 0 on error (e.g. if end < start).
*/
int ossl_quic_sstream_mark_transmitted(QUIC_SSTREAM *qss,
uint64_t start,
uint64_t end);
/*
* (For TX packetizer use.) Marks a STREAM frame with the FIN bit set as having
* been transmitted. final_size is the final size of the stream (i.e., the value
* offset + len of the transmitted STREAM frame).
*
* This function fails returning 0 if ossl_quic_sstream_fin() has not been called or if
* final_size is not correct. The final_size argument is not strictly needed by
* the QUIC_SSTREAM but is required as a sanity check.
*/
int ossl_quic_sstream_mark_transmitted_fin(QUIC_SSTREAM *qss,
uint64_t final_size);
/*
* (RX/ACKM use.) Marks a logical range of the send stream as having been lost.
* The send stream will return the lost data for retransmission on a future call
* to ossl_quic_sstream_get_stream_frame. The start and end values denote logical byte
* numbers and are inclusive.
*
* If the lost frame had the FIN bit set, you must also call
* ossl_quic_sstream_mark_lost_fin() after calling this function.
*
* Returns 1 on success and 0 on error (e.g. if end < start).
*/
int ossl_quic_sstream_mark_lost(QUIC_SSTREAM *qss,
uint64_t start,
uint64_t end);
/*
* (RX/ACKM use.) Informs the QUIC_SSTREAM that a STREAM frame with the FIN bit
* set was lost.
*
* Returns 1 on success and 0 on error.
*/
int ossl_quic_sstream_mark_lost_fin(QUIC_SSTREAM *qss);
/*
* (RX/ACKM use.) Marks a logical range of the send stream as having been
* acknowledged, meaning that the storage for the data in that range of the
* stream can be now recycled and neither that logical range of the stream nor
* any subset of it can be retransmitted again. The start and end values are
* inclusive.
*
* If the acknowledged frame had the FIN bit set, you must also call
* ossl_quic_sstream_mark_acked_fin() after calling this function.
*
* Returns 1 on success and 0 on error (e.g. if end < start).
*/
int ossl_quic_sstream_mark_acked(QUIC_SSTREAM *qss,
uint64_t start,
uint64_t end);
/*
* (RX/ACKM use.) Informs the QUIC_SSTREAM that a STREAM frame with the FIN bit
* set was acknowledged.
*
* Returns 1 on success and 0 on error.
*/
int ossl_quic_sstream_mark_acked_fin(QUIC_SSTREAM *qss);
/*
* (Front end use.) Appends user data to the stream. The data is copied into the
* stream. The amount of data consumed from buf is written to *consumed on
* success (short writes are possible). The amount of data which can be written
* can be determined in advance by calling the ossl_quic_sstream_get_buffer_avail()
* function; data is copied into an internal ring buffer of finite size.
*
* If the buffer is full, this should be materialised as a backpressure
* condition by the front end. This is not considered a failure condition;
* *consumed is written as 0 and the function returns 1.
*
* Returns 1 on success or 0 on failure.
*/
int ossl_quic_sstream_append(QUIC_SSTREAM *qss,
const unsigned char *buf,
size_t buf_len,
size_t *consumed);
/*
* Marks a stream as finished. ossl_quic_sstream_append() may not be called anymore
* after calling this.
*/
void ossl_quic_sstream_fin(QUIC_SSTREAM *qss);
/*
* Resizes the internal ring buffer. All stream data is preserved safely.
*
* This can be used to expand or contract the ring buffer, but not to contract
* the ring buffer below the amount of stream data currently stored in it.
* Returns 1 on success and 0 on failure.
*
* IMPORTANT: Any buffers referenced by iovecs output by
* ossl_quic_sstream_get_stream_frame() cease to be valid after calling this function.
*/
int ossl_quic_sstream_set_buffer_size(QUIC_SSTREAM *qss, size_t num_bytes);
/*
* Gets the internal ring buffer size in bytes.
*/
size_t ossl_quic_sstream_get_buffer_size(QUIC_SSTREAM *qss);
/*
* Gets the number of bytes used in the internal ring buffer.
*/
size_t ossl_quic_sstream_get_buffer_used(QUIC_SSTREAM *qss);
/*
* Gets the number of bytes free in the internal ring buffer.
*/
size_t ossl_quic_sstream_get_buffer_avail(QUIC_SSTREAM *qss);
/*
* Utility function to ensure the length of an array of iovecs matches the
* length given as len. Trailing iovecs have their length values reduced or set
* to 0 as necessary.
*/
void ossl_quic_sstream_adjust_iov(size_t len,
OSSL_QTX_IOVEC *iov,
size_t num_iov);
#endif

View File

@ -0,0 +1,64 @@
/*
* Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#ifndef OSSL_UINT_SET_H
# define OSSL_UINT_SET_H
#include "openssl/params.h"
/*
* uint64_t Integer Sets
* =====================
*
* Utilities for managing a logical set of unsigned 64-bit integers. The
* structure tracks each contiguous range of integers using one allocation and
* is thus optimised for cases where integers tend to appear consecutively.
* Queries are optimised under the assumption that they will generally be made
* on integers near the end of the set.
*
* Discussion of implementation details can be found in uint_set.c.
*/
typedef struct uint_range_st {
uint64_t start, end;
} UINT_RANGE;
typedef struct uint_set_item_st {
struct uint_set_item_st *prev, *next;
UINT_RANGE range;
} UINT_SET_ITEM;
typedef struct uint_set_st {
UINT_SET_ITEM *head, *tail;
/* Number of ranges (not integers) in the set. */
size_t num_ranges;
} UINT_SET;
void ossl_uint_set_init(UINT_SET *s);
void ossl_uint_set_destroy(UINT_SET *s);
/*
* Insert a range into a integer set. Returns 0 on allocation failure, in which
* case the integer set is in a valid but undefined state. Otherwise, returns 1.
* Ranges can overlap existing ranges without limitation. If a range is a subset
* of an existing range in the set, this is a no-op and returns 1.
*/
int ossl_uint_set_insert(UINT_SET *s, const UINT_RANGE *range);
/*
* Remove a range from the set. Returns 0 on allocation failure, in which case
* the integer set is unchanged. Otherwise, returns 1. Ranges which are not
* already in the set can be removed without issue. If a passed range is not in
* the integer set at all, this is a no-op and returns 1.
*/
int ossl_uint_set_remove(UINT_SET *s, const UINT_RANGE *range);
/* Returns 1 iff the given integer is in the integer set. */
int ossl_uint_set_query(const UINT_SET *s, uint64_t v);
#endif

View File

@ -1,3 +1,7 @@
$LIBSSL=../../libssl
SOURCE[$LIBSSL]=quic_method.c quic_impl.c quic_wire.c quic_ackm.c quic_statm.c cc_dummy.c quic_demux.c quic_record_rx.c quic_record_rx_wrap.c quic_record_tx.c quic_record_util.c quic_record_shared.c quic_wire_pkt.c quic_rx_depack.c quic_fc.c
SOURCE[$LIBSSL]=quic_method.c quic_impl.c quic_wire.c quic_ackm.c quic_statm.c
SOURCE[$LIBSSL]=cc_dummy.c quic_demux.c quic_record_rx.c
SOURCE[$LIBSSL]=quic_record_tx.c quic_record_util.c quic_record_shared.c quic_wire_pkt.c
SOURCE[$LIBSSL]=quic_record_rx_wrap.c quic_rx_depack.c
SOURCE[$LIBSSL]=quic_fc.c uint_set.c quic_stream.c

View File

@ -8,6 +8,7 @@
*/
#include "internal/quic_ackm.h"
#include "internal/uint_set.h"
#include "internal/common.h"
#include <assert.h>
@ -373,45 +374,14 @@ tx_pkt_history_remove(struct tx_pkt_history_st *h, uint64_t pkt_num)
* given PN until that PN becomes provably ACKed and we finally remove it from
* our set (by bumping the watermark) as no longer being our concern.
*
* The data structure supports the following operations:
* The data structure used is the UINT_SET structure defined in uint_set.h,
* which is used as a PN set. We use the following operations of the structure:
*
* Insert Range: Adds an inclusive range of packet numbers [start, end]
* to the set. Equivalent to Insert for each number
* in the range. (Used when we receive a new PN.)
* Insert Range: Used when we receive a new PN.
*
* Remove Range: Removes an inclusive range of packet numbers [start, end]
* from the set. Not all of the range need already be in
* the set, but any part of the range in the set is removed.
* (Used when bumping the watermark.)
* Remove Range: Used when bumping the watermark.
*
* Query: Is a PN in the data structure?
*
* The data structure can be iterated.
*
* For greater efficiency in tracking large numbers of contiguous PNs, we track
* PN ranges rather than individual PNs. The data structure manages a list of PN
* ranges [[start, end]...]. Internally this is implemented as a doubly linked
* sorted list of range structures, which are automatically split and merged as
* necessary.
*
* This data structure requires O(n) traversal of the list for insertion,
* removal and query when we are not adding/removing ranges which are near the
* beginning or end of the set of ranges. It is expected that the number of PN
* ranges needed at any given time will generally be small and that most
* operations will be close to the beginning or end of the range.
*
* Invariant: The data structure is always sorted in ascending order by PN.
*
* Invariant: No two adjacent ranges ever 'border' one another (have no
* numerical gap between them) as the data structure always ensures
* such ranges are merged.
*
* Invariant: No two ranges ever overlap.
*
* Invariant: No range [a, b] ever has a > b.
*
* Invariant: Since ranges are represented using inclusive bounds, no range
* item inside the data structure can represent a span of zero PNs.
* Query: Used to determine if a PN is in the set.
*
* **Possible duplicates.** A PN is considered a possible duplicate when either:
*
@ -435,344 +405,8 @@ tx_pkt_history_remove(struct tx_pkt_history_st *h, uint64_t pkt_num)
* used to update the state of the RX side of the ACK manager by bumping the
* watermark accordingly.
*/
struct pn_set_item_st {
struct pn_set_item_st *prev, *next;
OSSL_QUIC_ACK_RANGE range;
};
struct pn_set_st {
struct pn_set_item_st *head, *tail;
/* Number of ranges (not PNs) in the set. */
size_t num_ranges;
};
static void pn_set_init(struct pn_set_st *s)
{
s->head = s->tail = NULL;
s->num_ranges = 0;
}
static void pn_set_destroy(struct pn_set_st *s)
{
struct pn_set_item_st *x, *xnext;
for (x = s->head; x != NULL; x = xnext) {
xnext = x->next;
OPENSSL_free(x);
}
}
/* Possible merge of x, x->prev */
static void pn_set_merge_adjacent(struct pn_set_st *s, struct pn_set_item_st *x)
{
struct pn_set_item_st *xprev = x->prev;
if (xprev == NULL)
return;
if (x->range.start - 1 != xprev->range.end)
return;
x->range.start = xprev->range.start;
x->prev = xprev->prev;
if (x->prev != NULL)
x->prev->next = x;
if (s->head == xprev)
s->head = x;
OPENSSL_free(xprev);
--s->num_ranges;
}
/* Returns 1 if there exists a PN x which falls within both ranges a and b. */
static int pn_range_overlaps(const OSSL_QUIC_ACK_RANGE *a,
const OSSL_QUIC_ACK_RANGE *b)
{
return ossl_quic_pn_min(a->end, b->end)
>= ossl_quic_pn_max(a->start, b->start);
}
/*
* Insert a range into a PN set. Returns 0 on allocation failure, in which case
* the PN set is in a valid but undefined state. Otherwise, returns 1. Ranges
* can overlap existing ranges without limitation. If a range is a subset of
* an existing range in the set, this is a no-op and returns 1.
*/
static int pn_set_insert(struct pn_set_st *s, const OSSL_QUIC_ACK_RANGE *range)
{
struct pn_set_item_st *x, *z, *xnext, *f, *fnext;
QUIC_PN start = range->start, end = range->end;
if (!ossl_assert(start <= end))
return 0;
if (s->head == NULL) {
/* Nothing in the set yet, so just add this range. */
x = OPENSSL_zalloc(sizeof(struct pn_set_item_st));
if (x == NULL)
return 0;
x->range.start = start;
x->range.end = end;
s->head = s->tail = x;
++s->num_ranges;
return 1;
}
if (start > s->tail->range.end) {
/*
* Range is after the latest range in the set, so append.
*
* Note: The case where the range is before the earliest range in the
* set is handled as a degenerate case of the final case below. See
* optimization note (*) below.
*/
if (s->tail->range.end + 1 == start) {
s->tail->range.end = end;
return 1;
}
x = OPENSSL_zalloc(sizeof(struct pn_set_item_st));
if (x == NULL)
return 0;
x->range.start = start;
x->range.end = end;
x->prev = s->tail;
if (s->tail != NULL)
s->tail->next = x;
s->tail = x;
++s->num_ranges;
return 1;
}
if (start <= s->head->range.start && end >= s->tail->range.end) {
/*
* New range dwarfs all ranges in our set.
*
* Free everything except the first range in the set, which we scavenge
* and reuse.
*/
for (x = s->head->next; x != NULL; x = xnext) {
xnext = x->next;
OPENSSL_free(x);
}
s->head->range.start = start;
s->head->range.end = end;
s->head->next = s->head->prev = NULL;
s->tail = s->head;
s->num_ranges = 1;
return 1;
}
/*
* Walk backwards since we will most often be inserting at the end. As an
* optimization, test the head node first and skip iterating over the
* entire list if we are inserting at the start. The assumption is that
* insertion at the start and end of the space will be the most common
* operations. (*)
*/
z = end < s->head->range.start ? s->head : s->tail;
for (; z != NULL; z = z->prev) {
/* An existing range dwarfs our new range (optimisation). */
if (z->range.start <= start && z->range.end >= end)
return 1;
if (pn_range_overlaps(&z->range, range)) {
/*
* Our new range overlaps an existing range, or possibly several
* existing ranges.
*/
struct pn_set_item_st *ovend = z;
OSSL_QUIC_ACK_RANGE t;
size_t n = 0;
t.end = ossl_quic_pn_max(end, z->range.end);
/* Get earliest overlapping range. */
for (; z->prev != NULL && pn_range_overlaps(&z->prev->range, range);
z = z->prev);
t.start = ossl_quic_pn_min(start, z->range.start);
/* Replace sequence of nodes z..ovend with ovend only. */
ovend->range = t;
ovend->prev = z->prev;
if (z->prev != NULL)
z->prev->next = ovend;
if (s->head == z)
s->head = ovend;
/* Free now unused nodes. */
for (f = z; f != ovend; f = fnext, ++n) {
fnext = f->next;
OPENSSL_free(f);
}
s->num_ranges -= n;
break;
} else if (end < z->range.start
&& (z->prev == NULL || start > z->prev->range.end)) {
if (z->range.start == end + 1) {
/* We can extend the following range backwards. */
z->range.start = start;
/*
* If this closes a gap we now need to merge
* consecutive nodes.
*/
pn_set_merge_adjacent(s, z);
} else if (z->prev != NULL && z->prev->range.end + 1 == start) {
/* We can extend the preceding range forwards. */
z->prev->range.end = end;
/*
* If this closes a gap we now need to merge
* consecutive nodes.
*/
pn_set_merge_adjacent(s, z);
} else {
/*
* The new interval is between intervals without overlapping or
* touching them, so insert between, preserving sort.
*/
x = OPENSSL_zalloc(sizeof(struct pn_set_item_st));
if (x == NULL)
return 0;
x->range.start = start;
x->range.end = end;
x->next = z;
x->prev = z->prev;
if (x->prev != NULL)
x->prev->next = x;
z->prev = x;
if (s->head == z)
s->head = x;
++s->num_ranges;
}
break;
}
}
return 1;
}
/*
* Remove a range from the set. Returns 0 on allocation failure, in which case
* the PN set is unchanged. Otherwise, returns 1. Ranges which are not already
* in the set can be removed without issue. If a passed range is not in the PN
* set at all, this is a no-op and returns 1.
*/
static int pn_set_remove(struct pn_set_st *s, const OSSL_QUIC_ACK_RANGE *range)
{
struct pn_set_item_st *z, *zprev, *y;
QUIC_PN start = range->start, end = range->end;
if (!ossl_assert(start <= end))
return 0;
/* Walk backwards since we will most often be removing at the end. */
for (z = s->tail; z != NULL; z = zprev) {
zprev = z->prev;
if (start > z->range.end)
/* No overlapping ranges can exist beyond this point, so stop. */
break;
if (start <= z->range.start && end >= z->range.end) {
/*
* The range being removed dwarfs this range, so it should be
* removed.
*/
if (z->next != NULL)
z->next->prev = z->prev;
if (z->prev != NULL)
z->prev->next = z->next;
if (s->head == z)
s->head = z->next;
if (s->tail == z)
s->tail = z->prev;
OPENSSL_free(z);
--s->num_ranges;
} else if (start <= z->range.start) {
/*
* The range being removed includes start of this range, but does
* not cover the entire range (as this would be caught by the case
* above). Shorten the range.
*/
assert(end < z->range.end);
z->range.start = end + 1;
} else if (end >= z->range.end) {
/*
* The range being removed includes the end of this range, but does
* not cover the entire range (as this would be caught by the case
* above). Shorten the range. We can also stop iterating.
*/
assert(start > z->range.start);
assert(start > 0);
z->range.end = start - 1;
break;
} else if (start > z->range.start && end < z->range.end) {
/*
* The range being removed falls entirely in this range, so cut it
* into two. Cases where a zero-length range would be created are
* handled by the above cases.
*/
y = OPENSSL_zalloc(sizeof(struct pn_set_item_st));
if (y == NULL)
return 0;
y->range.end = z->range.end;
y->range.start = end + 1;
y->next = z->next;
y->prev = z;
if (y->next != NULL)
y->next->prev = y;
z->range.end = start - 1;
z->next = y;
if (s->tail == z)
s->tail = y;
++s->num_ranges;
break;
} else {
/* Assert no partial overlap; all cases should be covered above. */
assert(!pn_range_overlaps(&z->range, range));
}
}
return 1;
}
/* Returns 1 iff the given PN is in the PN set. */
static int pn_set_query(const struct pn_set_st *s, QUIC_PN pn)
{
struct pn_set_item_st *x;
if (s->head == NULL)
return 0;
for (x = s->tail; x != NULL; x = x->prev)
if (x->range.start <= pn && x->range.end >= pn)
return 1;
else if (x->range.end < pn)
return 0;
return 0;
}
struct rx_pkt_history_st {
struct pn_set_st set;
UINT_SET set;
/*
* Invariant: PNs below this are not in the set.
@ -786,13 +420,13 @@ static int rx_pkt_history_bump_watermark(struct rx_pkt_history_st *h,
static void rx_pkt_history_init(struct rx_pkt_history_st *h)
{
pn_set_init(&h->set);
ossl_uint_set_init(&h->set);
h->watermark = 0;
}
static void rx_pkt_history_destroy(struct rx_pkt_history_st *h)
{
pn_set_destroy(&h->set);
ossl_uint_set_destroy(&h->set);
}
/*
@ -806,12 +440,12 @@ static void rx_pkt_history_trim_range_count(struct rx_pkt_history_st *h)
QUIC_PN highest = QUIC_PN_INVALID;
while (h->set.num_ranges > MAX_RX_ACK_RANGES) {
OSSL_QUIC_ACK_RANGE r = h->set.head->range;
UINT_RANGE r = h->set.head->range;
highest = (highest == QUIC_PN_INVALID)
? r.end : ossl_quic_pn_max(highest, r.end);
pn_set_remove(&h->set, &r);
ossl_uint_set_remove(&h->set, &r);
}
/*
@ -825,7 +459,7 @@ static void rx_pkt_history_trim_range_count(struct rx_pkt_history_st *h)
static int rx_pkt_history_add_pn(struct rx_pkt_history_st *h,
QUIC_PN pn)
{
OSSL_QUIC_ACK_RANGE r;
UINT_RANGE r;
r.start = pn;
r.end = pn;
@ -833,7 +467,7 @@ static int rx_pkt_history_add_pn(struct rx_pkt_history_st *h,
if (pn < h->watermark)
return 1; /* consider this a success case */
if (pn_set_insert(&h->set, &r) != 1)
if (ossl_uint_set_insert(&h->set, &r) != 1)
return 0;
rx_pkt_history_trim_range_count(h);
@ -843,7 +477,7 @@ static int rx_pkt_history_add_pn(struct rx_pkt_history_st *h,
static int rx_pkt_history_bump_watermark(struct rx_pkt_history_st *h,
QUIC_PN watermark)
{
OSSL_QUIC_ACK_RANGE r;
UINT_RANGE r;
if (watermark <= h->watermark)
return 1;
@ -851,7 +485,7 @@ static int rx_pkt_history_bump_watermark(struct rx_pkt_history_st *h,
/* Remove existing PNs below the watermark. */
r.start = 0;
r.end = watermark - 1;
if (pn_set_remove(&h->set, &r) != 1)
if (ossl_uint_set_remove(&h->set, &r) != 1)
return 0;
h->watermark = watermark;
@ -1936,7 +1570,7 @@ static void ackm_fill_rx_ack_ranges(OSSL_ACKM *ackm, int pkt_space,
OSSL_QUIC_FRAME_ACK *ack)
{
struct rx_pkt_history_st *h = get_rx_history(ackm, pkt_space);
struct pn_set_item_st *x;
UINT_SET_ITEM *x;
size_t i = 0;
/*
@ -1945,8 +1579,10 @@ static void ackm_fill_rx_ack_ranges(OSSL_ACKM *ackm, int pkt_space,
*/
for (x = h->set.tail;
x != NULL && i < OSSL_NELEM(ackm->ack_ranges);
x = x->prev, ++i)
ackm->ack_ranges[pkt_space][i] = x->range;
x = x->prev, ++i) {
ackm->ack_ranges[pkt_space][i].start = x->range.start;
ackm->ack_ranges[pkt_space][i].end = x->range.end;
}
ack->ack_ranges = ackm->ack_ranges[pkt_space];
ack->num_ack_ranges = i;
@ -1995,7 +1631,7 @@ int ossl_ackm_is_rx_pn_processable(OSSL_ACKM *ackm, QUIC_PN pn, int pkt_space)
{
struct rx_pkt_history_st *h = get_rx_history(ackm, pkt_space);
return pn >= h->watermark && pn_set_query(&h->set, pn) == 0;
return pn >= h->watermark && ossl_uint_set_query(&h->set, pn) == 0;
}
void ossl_ackm_set_loss_detection_deadline_callback(OSSL_ACKM *ackm,

View File

@ -1,3 +1,12 @@
/*
* Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include "internal/quic_demux.h"
#include "internal/quic_wire_pkt.h"
#include "internal/common.h"

537
ssl/quic/quic_stream.c Normal file
View File

@ -0,0 +1,537 @@
/*
* Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include "internal/quic_stream.h"
#include "internal/uint_set.h"
#include "internal/common.h"
/*
* ==================================================================
* Byte-wise ring buffer which supports pushing and popping blocks of multiple
* bytes at a time. The logical offset of each byte for the purposes of a QUIC
* stream is tracked. Bytes can be popped from the ring buffer in two stages;
* first they are popped, and then they are culled. Bytes which have been popped
* but not yet culled will not be overwritten, and can be restored.
*/
struct ring_buf {
void *start;
size_t alloc; /* size of buffer allocation in bytes */
/*
* Logical offset of the head (where we append to). This is the current size
* of the QUIC stream. This increases monotonically.
*/
uint64_t head_offset;
/*
* Logical offset of the cull tail. Data is no longer needed and is
* deallocated as the cull tail advances, which occurs as data is
* acknowledged. This increases monotonically.
*/
uint64_t ctail_offset;
};
static int ring_buf_init(struct ring_buf *r)
{
r->start = NULL;
r->alloc = 0;
r->head_offset = r->ctail_offset = 0;
return 1;
}
static void ring_buf_destroy(struct ring_buf *r)
{
OPENSSL_free(r->start);
r->start = NULL;
r->alloc = 0;
}
static size_t ring_buf_used(struct ring_buf *r)
{
return r->head_offset - r->ctail_offset;
}
static size_t ring_buf_avail(struct ring_buf *r)
{
return r->alloc - ring_buf_used(r);
}
static size_t ring_buf_push(struct ring_buf *r,
const unsigned char *buf, size_t buf_len)
{
size_t pushed = 0, avail, idx, l, i;
unsigned char *start = r->start;
for (i = 0;; ++i) {
avail = ring_buf_avail(r);
if (buf_len > avail)
buf_len = avail;
if (buf_len == 0)
break;
assert(i < 2);
idx = r->head_offset % r->alloc;
l = r->alloc - idx;
if (buf_len < l)
l = buf_len;
memcpy(start + idx, buf, l);
r->head_offset += l;
buf += l;
buf_len -= l;
pushed += l;
}
return pushed;
}
/*
* Retrieves data out of the read size of the ring buffer starting at the given
* logical offset. *buf is set to point to a contiguous span of bytes and
* *buf_len is set to the number of contiguous bytes. After this function
* returns, there may or may not be more bytes available at the logical offset
* of (logical_offset + *buf_len) by calling this function again. If the logical
* offset is out of the range retained by the ring buffer, returns 0, else
* returns 1. A logical offset at the end of the range retained by the ring
* buffer is not considered an error and is returned with a *buf_len of 0.
*
* The ring buffer state is not changed.
*/
static int ring_buf_get_buf_at(const struct ring_buf *r,
uint64_t logical_offset,
const unsigned char **buf, size_t *buf_len)
{
const unsigned char *start = r->start;
size_t idx, l;
if (logical_offset > r->head_offset || logical_offset < r->ctail_offset)
return 0;
if (r->alloc == 0) {
*buf = NULL;
*buf_len = 0;
return 1;
}
idx = logical_offset % r->alloc;
l = r->head_offset - logical_offset;
if (l > r->alloc - idx)
l = r->alloc - idx;
*buf = start + idx;
*buf_len = l;
return 1;
}
static void ring_buf_cpop_range(struct ring_buf *r,
uint64_t start, uint64_t end)
{
assert(end >= start);
if (start > r->ctail_offset)
return;
r->ctail_offset = end + 1;
}
static int ring_buf_resize(struct ring_buf *r, size_t num_bytes)
{
struct ring_buf rnew = {0};
const unsigned char *src = NULL;
size_t src_len = 0, copied = 0;
if (num_bytes == r->alloc)
return 1;
if (num_bytes < ring_buf_used(r))
return 0;
rnew.start = OPENSSL_malloc(num_bytes);
if (rnew.start == NULL)
return 0;
rnew.alloc = num_bytes;
rnew.head_offset = r->head_offset - ring_buf_used(r);
rnew.ctail_offset = rnew.head_offset;
for (;;) {
if (!ring_buf_get_buf_at(r, r->ctail_offset + copied, &src, &src_len)) {
OPENSSL_free(rnew.start);
return 0;
}
if (src_len == 0)
break;
if (ring_buf_push(&rnew, src, src_len) != src_len) {
OPENSSL_free(rnew.start);
return 0;
}
copied += src_len;
}
assert(rnew.head_offset == r->head_offset);
rnew.ctail_offset = r->ctail_offset;
OPENSSL_free(r->start);
memcpy(r, &rnew, sizeof(*r));
return 1;
}
/*
* ==================================================================
* QUIC Send Stream
*/
struct quic_sstream_st {
struct ring_buf ring_buf;
/*
* Any logical byte in the stream is in one of these states:
*
* - NEW: The byte has not yet been transmitted, or has been lost and is
* in need of retransmission.
*
* - IN_FLIGHT: The byte has been transmitted but is awaiting
* acknowledgement. We continue to store the data in case we return
* to the NEW state.
*
* - ACKED: The byte has been acknowledged and we can cease storing it.
* We do not necessarily cull it immediately, so there may be a delay
* between reaching the ACKED state and the buffer space actually being
* recycled.
*
* A logical byte in the stream is
*
* - in the NEW state if it is in new_set;
* - is in the ACKED state if it is in acked_set
* (and may or may not have been culled);
* - is in the IN_FLIGHT state otherwise.
*
* Invariant: No logical byte is ever in both new_set and acked_set.
*/
UINT_SET new_set, acked_set;
/*
* The current size of the stream is ring_buf.head_offset. If
* have_final_size is true, this is also the final size of the stream.
*/
unsigned int have_final_size : 1;
unsigned int sent_final_size : 1;
unsigned int acked_final_size : 1;
};
static void qss_cull(QUIC_SSTREAM *qss);
QUIC_SSTREAM *ossl_quic_sstream_new(size_t init_buf_size)
{
QUIC_SSTREAM *qss;
qss = OPENSSL_zalloc(sizeof(QUIC_SSTREAM));
if (qss == NULL)
return NULL;
ring_buf_init(&qss->ring_buf);
if (!ring_buf_resize(&qss->ring_buf, init_buf_size)) {
ring_buf_destroy(&qss->ring_buf);
OPENSSL_free(qss);
return NULL;
}
ossl_uint_set_init(&qss->new_set);
ossl_uint_set_init(&qss->acked_set);
return qss;
}
void ossl_quic_sstream_free(QUIC_SSTREAM *qss)
{
if (qss == NULL)
return;
ossl_uint_set_destroy(&qss->new_set);
ossl_uint_set_destroy(&qss->acked_set);
ring_buf_destroy(&qss->ring_buf);
OPENSSL_free(qss);
}
int ossl_quic_sstream_get_stream_frame(QUIC_SSTREAM *qss,
size_t skip,
OSSL_QUIC_FRAME_STREAM *hdr,
OSSL_QTX_IOVEC *iov,
size_t *num_iov)
{
size_t num_iov_ = 0, src_len = 0, total_len = 0, i;
uint64_t max_len;
const unsigned char *src = NULL;
UINT_SET_ITEM *range = qss->new_set.head;
if (*num_iov < 2)
return 0;
for (i = 0; i < skip && range != NULL; ++i)
range = range->next;
if (range == NULL) {
/* No new bytes to send, but we might have a FIN */
if (!qss->have_final_size || qss->sent_final_size)
return 0;
hdr->offset = qss->ring_buf.head_offset;
hdr->len = 0;
hdr->is_fin = 1;
*num_iov = 0;
return 1;
}
/*
* We can only send a contiguous range of logical bytes in a single
* stream frame, so limit ourselves to the range of the first set entry.
*
* Set entries never have 'adjacent' entries so we don't have to worry
* about them here.
*/
max_len = range->range.end - range->range.start + 1;
for (i = 0;; ++i) {
if (total_len >= max_len)
break;
if (!ring_buf_get_buf_at(&qss->ring_buf,
range->range.start + total_len,
&src, &src_len))
return 0;
if (src_len == 0)
break;
assert(i < 2);
if (total_len + src_len > max_len)
src_len = max_len - total_len;
iov[num_iov_].buf = src;
iov[num_iov_].buf_len = src_len;
total_len += src_len;
++num_iov_;
}
hdr->offset = range->range.start;
hdr->len = total_len;
hdr->is_fin = qss->have_final_size
&& hdr->offset + hdr->len == qss->ring_buf.head_offset;
*num_iov = num_iov_;
return 1;
}
int ossl_quic_sstream_mark_transmitted(QUIC_SSTREAM *qss,
uint64_t start,
uint64_t end)
{
UINT_RANGE r;
r.start = start;
r.end = end;
if (!ossl_uint_set_remove(&qss->new_set, &r))
return 0;
return 1;
}
int ossl_quic_sstream_mark_transmitted_fin(QUIC_SSTREAM *qss,
uint64_t final_size)
{
/*
* We do not really need final_size since we already know the size of the
* stream, but this serves as a sanity check.
*/
if (!qss->have_final_size || final_size != qss->ring_buf.head_offset)
return 0;
qss->sent_final_size = 1;
return 1;
}
int ossl_quic_sstream_mark_lost(QUIC_SSTREAM *qss,
uint64_t start,
uint64_t end)
{
UINT_RANGE r;
r.start = start;
r.end = end;
/*
* We lost a range of stream data bytes, so reinsert them into the new set,
* so that they are returned once more by ossl_quic_sstream_get_stream_frame.
*/
if (!ossl_uint_set_insert(&qss->new_set, &r))
return 0;
return 1;
}
int ossl_quic_sstream_mark_lost_fin(QUIC_SSTREAM *qss)
{
if (qss->acked_final_size)
/* Does not make sense to lose a FIN after it has been ACKed */
return 0;
/* FIN was lost, so we need to transmit it again. */
qss->sent_final_size = 0;
return 1;
}
int ossl_quic_sstream_mark_acked(QUIC_SSTREAM *qss,
uint64_t start,
uint64_t end)
{
UINT_RANGE r;
r.start = start;
r.end = end;
if (!ossl_uint_set_insert(&qss->acked_set, &r))
return 0;
qss_cull(qss);
return 1;
}
int ossl_quic_sstream_mark_acked_fin(QUIC_SSTREAM *qss)
{
if (!qss->have_final_size)
/* Cannot ack final size before we have a final size */
return 0;
qss->acked_final_size = 1;
return 1;
}
void ossl_quic_sstream_fin(QUIC_SSTREAM *qss)
{
if (qss->have_final_size)
return;
qss->have_final_size = 1;
}
int ossl_quic_sstream_append(QUIC_SSTREAM *qss,
const unsigned char *buf,
size_t buf_len,
size_t *consumed)
{
size_t l, consumed_ = 0;
UINT_RANGE r;
struct ring_buf old_ring_buf = qss->ring_buf;
if (qss->have_final_size) {
*consumed = 0;
return 0;
}
/*
* Note: It is assumed that ossl_quic_sstream_append will be called during a
* call to e.g. SSL_write and this function is therefore designed to support
* such semantics. In particular, the buffer pointed to by buf is only
* assumed to be valid for the duration of this call, therefore we must copy
* the data here. We will later copy-and-encrypt the data during packet
* encryption, so this is a two-copy design. Supporting a one-copy design in
* the future will require applications to use a different kind of API.
* Supporting such changes in future will require corresponding enhancements
* to this code.
*/
while (buf_len > 0) {
l = ring_buf_push(&qss->ring_buf, buf, buf_len);
if (l == 0)
break;
buf += l;
buf_len -= l;
consumed_ += l;
}
if (consumed_ > 0) {
r.start = old_ring_buf.head_offset;
r.end = r.start + consumed_ - 1;
assert(r.end + 1 == qss->ring_buf.head_offset);
if (!ossl_uint_set_insert(&qss->new_set, &r)) {
qss->ring_buf = old_ring_buf;
*consumed = 0;
return 0;
}
}
*consumed = consumed_;
return 1;
}
static void qss_cull(QUIC_SSTREAM *qss)
{
/*
* Potentially cull data from our ring buffer. This can happen once data has
* been ACKed and we know we are never going to have to transmit it again.
*
* Since we use a ring buffer design for simplicity, we cannot cull byte n +
* k (for k > 0) from the ring buffer until byte n has also been culled.
* This means if parts of the stream get acknowledged out of order we might
* keep around some data we technically don't need to for a while. The
* impact of this is likely to be small and limited to quite a short
* duration, and doesn't justify the use of a more complex design.
*/
/*
* We only need to check the first range entry in the integer set because we
* can only cull contiguous areas at the start of the ring buffer anyway.
*/
if (qss->acked_set.head != NULL)
ring_buf_cpop_range(&qss->ring_buf,
qss->acked_set.head->range.start,
qss->acked_set.head->range.end);
}
int ossl_quic_sstream_set_buffer_size(QUIC_SSTREAM *qss, size_t num_bytes)
{
return ring_buf_resize(&qss->ring_buf, num_bytes);
}
size_t ossl_quic_sstream_get_buffer_size(QUIC_SSTREAM *qss)
{
return qss->ring_buf.alloc;
}
size_t ossl_quic_sstream_get_buffer_used(QUIC_SSTREAM *qss)
{
return ring_buf_used(&qss->ring_buf);
}
size_t ossl_quic_sstream_get_buffer_avail(QUIC_SSTREAM *qss)
{
return ring_buf_avail(&qss->ring_buf);
}
void ossl_quic_sstream_adjust_iov(size_t len,
OSSL_QTX_IOVEC *iov,
size_t num_iov)
{
size_t running = 0, i, iovlen;
for (i = 0, running = 0; i < num_iov; ++i) {
iovlen = iov[i].buf_len;
if (running >= len)
iov[i].buf_len = 0;
else if (running + iovlen > len)
iov[i].buf_len = len - running;
running += iovlen;
}
}

382
ssl/quic/uint_set.c Normal file
View File

@ -0,0 +1,382 @@
/*
* Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include "internal/uint_set.h"
#include "internal/common.h"
#include <assert.h>
/*
* uint64_t Integer Sets
* =====================
*
* This data structure supports the following operations:
*
* Insert Range: Adds an inclusive range of integers [start, end]
* to the set. Equivalent to Insert for each number
* in the range.
*
* Remove Range: Removes an inclusive range of integers [start, end]
* from the set. Not all of the range need already be in
* the set, but any part of the range in the set is removed.
*
* Query: Is an integer in the data structure?
*
* The data structure can be iterated.
*
* For greater efficiency in tracking large numbers of contiguous integers, we
* track integer ranges rather than individual integers. The data structure
* manages a list of integer ranges [[start, end]...]. Internally this is
* implemented as a doubly linked sorted list of range structures, which are
* automatically split and merged as necessary.
*
* This data structure requires O(n) traversal of the list for insertion,
* removal and query when we are not adding/removing ranges which are near the
* beginning or end of the set of ranges. For the applications for which this
* data structure is used (e.g. QUIC PN tracking for ACK generation), it is
* expected that the number of integer ranges needed at any given time will
* generally be small and that most operations will be close to the beginning or
* end of the range.
*
* Invariant: The data structure is always sorted in ascending order by value.
*
* Invariant: No two adjacent ranges ever 'border' one another (have no
* numerical gap between them) as the data structure always ensures
* such ranges are merged.
*
* Invariant: No two ranges ever overlap.
*
* Invariant: No range [a, b] ever has a > b.
*
* Invariant: Since ranges are represented using inclusive bounds, no range
* item inside the data structure can represent a span of zero
* integers.
*/
void ossl_uint_set_init(UINT_SET *s)
{
s->head = s->tail = NULL;
s->num_ranges = 0;
}
void ossl_uint_set_destroy(UINT_SET *s)
{
UINT_SET_ITEM *x, *xnext;
for (x = s->head; x != NULL; x = xnext) {
xnext = x->next;
OPENSSL_free(x);
}
}
/* Possible merge of x, x->prev */
static void uint_set_merge_adjacent(UINT_SET *s, UINT_SET_ITEM *x)
{
UINT_SET_ITEM *xprev = x->prev;
if (xprev == NULL)
return;
if (x->range.start - 1 != xprev->range.end)
return;
x->range.start = xprev->range.start;
x->prev = xprev->prev;
if (x->prev != NULL)
x->prev->next = x;
if (s->head == xprev)
s->head = x;
OPENSSL_free(xprev);
--s->num_ranges;
}
static uint64_t u64_min(uint64_t x, uint64_t y)
{
return x < y ? x : y;
}
static uint64_t u64_max(uint64_t x, uint64_t y)
{
return x > y ? x : y;
}
/*
* Returns 1 if there exists an integer x which falls within both ranges a and
* b.
*/
static int uint_range_overlaps(const UINT_RANGE *a,
const UINT_RANGE *b)
{
return u64_min(a->end, b->end)
>= u64_max(a->start, b->start);
}
int ossl_uint_set_insert(UINT_SET *s, const UINT_RANGE *range)
{
UINT_SET_ITEM *x, *z, *xnext, *f, *fnext;
uint64_t start = range->start, end = range->end;
if (!ossl_assert(start <= end))
return 0;
if (s->head == NULL) {
/* Nothing in the set yet, so just add this range. */
x = OPENSSL_zalloc(sizeof(UINT_SET_ITEM));
if (x == NULL)
return 0;
x->range.start = start;
x->range.end = end;
s->head = s->tail = x;
++s->num_ranges;
return 1;
}
if (start > s->tail->range.end) {
/*
* Range is after the latest range in the set, so append.
*
* Note: The case where the range is before the earliest range in the
* set is handled as a degenerate case of the final case below. See
* optimization note (*) below.
*/
if (s->tail->range.end + 1 == start) {
s->tail->range.end = end;
return 1;
}
x = OPENSSL_zalloc(sizeof(UINT_SET_ITEM));
if (x == NULL)
return 0;
x->range.start = start;
x->range.end = end;
x->prev = s->tail;
if (s->tail != NULL)
s->tail->next = x;
s->tail = x;
++s->num_ranges;
return 1;
}
if (start <= s->head->range.start && end >= s->tail->range.end) {
/*
* New range dwarfs all ranges in our set.
*
* Free everything except the first range in the set, which we scavenge
* and reuse.
*/
for (x = s->head->next; x != NULL; x = xnext) {
xnext = x->next;
OPENSSL_free(x);
}
s->head->range.start = start;
s->head->range.end = end;
s->head->next = s->head->prev = NULL;
s->tail = s->head;
s->num_ranges = 1;
return 1;
}
/*
* Walk backwards since we will most often be inserting at the end. As an
* optimization, test the head node first and skip iterating over the
* entire list if we are inserting at the start. The assumption is that
* insertion at the start and end of the space will be the most common
* operations. (*)
*/
z = end < s->head->range.start ? s->head : s->tail;
for (; z != NULL; z = z->prev) {
/* An existing range dwarfs our new range (optimisation). */
if (z->range.start <= start && z->range.end >= end)
return 1;
if (uint_range_overlaps(&z->range, range)) {
/*
* Our new range overlaps an existing range, or possibly several
* existing ranges.
*/
UINT_SET_ITEM *ovend = z;
UINT_RANGE t;
size_t n = 0;
t.end = u64_max(end, z->range.end);
/* Get earliest overlapping range. */
for (; z->prev != NULL && uint_range_overlaps(&z->prev->range, range);
z = z->prev);
t.start = u64_min(start, z->range.start);
/* Replace sequence of nodes z..ovend with ovend only. */
ovend->range = t;
ovend->prev = z->prev;
if (z->prev != NULL)
z->prev->next = ovend;
if (s->head == z)
s->head = ovend;
/* Free now unused nodes. */
for (f = z; f != ovend; f = fnext, ++n) {
fnext = f->next;
OPENSSL_free(f);
}
s->num_ranges -= n;
break;
} else if (end < z->range.start
&& (z->prev == NULL || start > z->prev->range.end)) {
if (z->range.start == end + 1) {
/* We can extend the following range backwards. */
z->range.start = start;
/*
* If this closes a gap we now need to merge
* consecutive nodes.
*/
uint_set_merge_adjacent(s, z);
} else if (z->prev != NULL && z->prev->range.end + 1 == start) {
/* We can extend the preceding range forwards. */
z->prev->range.end = end;
/*
* If this closes a gap we now need to merge
* consecutive nodes.
*/
uint_set_merge_adjacent(s, z);
} else {
/*
* The new interval is between intervals without overlapping or
* touching them, so insert between, preserving sort.
*/
x = OPENSSL_zalloc(sizeof(UINT_SET_ITEM));
if (x == NULL)
return 0;
x->range.start = start;
x->range.end = end;
x->next = z;
x->prev = z->prev;
if (x->prev != NULL)
x->prev->next = x;
z->prev = x;
if (s->head == z)
s->head = x;
++s->num_ranges;
}
break;
}
}
return 1;
}
int ossl_uint_set_remove(UINT_SET *s, const UINT_RANGE *range)
{
UINT_SET_ITEM *z, *zprev, *y;
uint64_t start = range->start, end = range->end;
if (!ossl_assert(start <= end))
return 0;
/* Walk backwards since we will most often be removing at the end. */
for (z = s->tail; z != NULL; z = zprev) {
zprev = z->prev;
if (start > z->range.end)
/* No overlapping ranges can exist beyond this point, so stop. */
break;
if (start <= z->range.start && end >= z->range.end) {
/*
* The range being removed dwarfs this range, so it should be
* removed.
*/
if (z->next != NULL)
z->next->prev = z->prev;
if (z->prev != NULL)
z->prev->next = z->next;
if (s->head == z)
s->head = z->next;
if (s->tail == z)
s->tail = z->prev;
OPENSSL_free(z);
--s->num_ranges;
} else if (start <= z->range.start) {
/*
* The range being removed includes start of this range, but does
* not cover the entire range (as this would be caught by the case
* above). Shorten the range.
*/
assert(end < z->range.end);
z->range.start = end + 1;
} else if (end >= z->range.end) {
/*
* The range being removed includes the end of this range, but does
* not cover the entire range (as this would be caught by the case
* above). Shorten the range. We can also stop iterating.
*/
assert(start > z->range.start);
assert(start > 0);
z->range.end = start - 1;
break;
} else if (start > z->range.start && end < z->range.end) {
/*
* The range being removed falls entirely in this range, so cut it
* into two. Cases where a zero-length range would be created are
* handled by the above cases.
*/
y = OPENSSL_zalloc(sizeof(UINT_SET_ITEM));
if (y == NULL)
return 0;
y->range.end = z->range.end;
y->range.start = end + 1;
y->next = z->next;
y->prev = z;
if (y->next != NULL)
y->next->prev = y;
z->range.end = start - 1;
z->next = y;
if (s->tail == z)
s->tail = y;
++s->num_ranges;
break;
} else {
/* Assert no partial overlap; all cases should be covered above. */
assert(!uint_range_overlaps(&z->range, range));
}
}
return 1;
}
int ossl_uint_set_query(const UINT_SET *s, uint64_t v)
{
UINT_SET_ITEM *x;
if (s->head == NULL)
return 0;
for (x = s->tail; x != NULL; x = x->prev)
if (x->range.start <= v && x->range.end >= v)
return 1;
else if (x->range.end < v)
return 0;
return 0;
}

View File

@ -291,6 +291,10 @@ IF[{- !$disabled{tests} -}]
INCLUDE[quic_fc_test]=../include ../apps/include
DEPEND[quic_fc_test]=../libcrypto.a ../libssl.a libtestutil.a
SOURCE[quic_stream_test]=quic_stream_test.c
INCLUDE[quic_stream_test]=../include ../apps/include
DEPEND[quic_stream_test]=../libcrypto.a ../libssl.a libtestutil.a
SOURCE[asynctest]=asynctest.c
INCLUDE[asynctest]=../include ../apps/include
DEPEND[asynctest]=../libcrypto
@ -1000,7 +1004,7 @@ ENDIF
ENDIF
IF[{- !$disabled{'quic'} -}]
PROGRAMS{noinst}=quicapitest quic_wire_test quic_ackm_test quic_record_test quic_fc_test
PROGRAMS{noinst}=quicapitest quic_wire_test quic_ackm_test quic_record_test quic_fc_test quic_stream_test
ENDIF
SOURCE[quicapitest]=quicapitest.c helpers/ssltestlib.c

330
test/quic_stream_test.c Normal file
View File

@ -0,0 +1,330 @@
/*
* Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
*
* Licensed under the Apache License 2.0 (the "License"). You may not use
* this file except in compliance with the License. You can obtain a copy
* in the file LICENSE in the source distribution or at
* https://www.openssl.org/source/license.html
*/
#include "internal/packet.h"
#include "internal/quic_stream.h"
#include "testutil.h"
static int compare_iov(const unsigned char *ref, size_t ref_len,
const OSSL_QTX_IOVEC *iov, size_t iov_len)
{
size_t i, total_len = 0;
const unsigned char *cur = ref;
for (i = 0; i < iov_len; ++i)
total_len += iov[i].buf_len;
if (ref_len != total_len) {
fprintf(stderr, "# expected %lu == %lu\n", ref_len, total_len);
return 0;
}
for (i = 0; i < iov_len; ++i) {
if (memcmp(cur, iov[i].buf, iov[i].buf_len))
return 0;
cur += iov[i].buf_len;
}
return 1;
}
static const unsigned char data_1[] = {
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59,
0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f
};
static int test_simple(void)
{
int testresult = 0;
QUIC_SSTREAM *sstream = NULL;
OSSL_QUIC_FRAME_STREAM hdr;
OSSL_QTX_IOVEC iov[2];
size_t num_iov = 0, wr = 0, i, init_size = 8192;
if (!TEST_ptr(sstream = ossl_quic_sstream_new(init_size)))
goto err;
/* Should not have any data yet */
num_iov = OSSL_NELEM(iov);
if (!TEST_false(ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov,
&num_iov)))
goto err;
/* Append data */
if (!TEST_true(ossl_quic_sstream_append(sstream, data_1, sizeof(data_1),
&wr))
|| !TEST_size_t_eq(wr, sizeof(data_1)))
goto err;
/* Read data */
num_iov = OSSL_NELEM(iov);
if (!TEST_true(ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov,
&num_iov))
|| !TEST_size_t_gt(num_iov, 0)
|| !TEST_uint64_t_eq(hdr.offset, 0)
|| !TEST_uint64_t_eq(hdr.len, sizeof(data_1))
|| !TEST_false(hdr.is_fin))
goto err;
if (!TEST_true(compare_iov(data_1, sizeof(data_1), iov, num_iov)))
goto err;
/* Mark data as half transmitted */
if (!TEST_true(ossl_quic_sstream_mark_transmitted(sstream, 0, 7)))
goto err;
/* Read data */
num_iov = OSSL_NELEM(iov);
if (!TEST_true(ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov,
&num_iov))
|| !TEST_size_t_gt(num_iov, 0)
|| !TEST_uint64_t_eq(hdr.offset, 8)
|| !TEST_uint64_t_eq(hdr.len, sizeof(data_1) - 8)
|| !TEST_false(hdr.is_fin))
goto err;
if (!TEST_true(compare_iov(data_1 + 8, sizeof(data_1) - 8, iov, num_iov)))
goto err;
if (!TEST_true(ossl_quic_sstream_mark_transmitted(sstream, 8, 15)))
goto err;
/* Read more data; should not be any more */
num_iov = OSSL_NELEM(iov);
if (!TEST_false(ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov,
&num_iov)))
goto err;
/* Now we have lost bytes 4-6 */
if (!TEST_true(ossl_quic_sstream_mark_lost(sstream, 4, 6)))
goto err;
/* Should be able to read them */
num_iov = OSSL_NELEM(iov);
if (!TEST_true(ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov,
&num_iov))
|| !TEST_size_t_gt(num_iov, 0)
|| !TEST_uint64_t_eq(hdr.offset, 4)
|| !TEST_uint64_t_eq(hdr.len, 3)
|| !TEST_false(hdr.is_fin))
goto err;
if (!TEST_true(compare_iov(data_1 + 4, 3, iov, num_iov)))
goto err;
/* Retransmit */
if (!TEST_true(ossl_quic_sstream_mark_transmitted(sstream, 4, 6)))
goto err;
/* Read more data; should not be any more */
num_iov = OSSL_NELEM(iov);
if (!TEST_false(ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov,
&num_iov)))
goto err;
if (!TEST_size_t_eq(ossl_quic_sstream_get_buffer_used(sstream), 16))
goto err;
/* Data has been acknowledged, space should be not be freed yet */
if (!TEST_true(ossl_quic_sstream_mark_acked(sstream, 1, 7))
|| !TEST_size_t_eq(ossl_quic_sstream_get_buffer_used(sstream), 16))
goto err;
/* Now data should be freed */
if (!TEST_true(ossl_quic_sstream_mark_acked(sstream, 0, 0))
|| !TEST_size_t_eq(ossl_quic_sstream_get_buffer_used(sstream), 8))
goto err;
if (!TEST_true(ossl_quic_sstream_mark_acked(sstream, 0, 15))
|| !TEST_size_t_eq(ossl_quic_sstream_get_buffer_used(sstream), 0))
goto err;
/* Now FIN */
ossl_quic_sstream_fin(sstream);
/* Get FIN frame */
for (i = 0; i < 2; ++i) {
num_iov = OSSL_NELEM(iov);
if (!TEST_true(ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov,
&num_iov))
|| !TEST_uint64_t_eq(hdr.offset, 16)
|| !TEST_uint64_t_eq(hdr.len, 0)
|| !TEST_true(hdr.is_fin)
|| !TEST_size_t_eq(num_iov, 0))
goto err;
}
if (!TEST_true(ossl_quic_sstream_mark_transmitted_fin(sstream, 16)))
goto err;
/* Read more data; FIN should not be returned any more */
num_iov = OSSL_NELEM(iov);
if (!TEST_false(ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov,
&num_iov)))
goto err;
/* Lose FIN frame */
if (!TEST_true(ossl_quic_sstream_mark_lost_fin(sstream)))
goto err;
/* Get FIN frame */
for (i = 0; i < 2; ++i) {
num_iov = OSSL_NELEM(iov);
if (!TEST_true(ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov,
&num_iov))
|| !TEST_uint64_t_eq(hdr.offset, 16)
|| !TEST_uint64_t_eq(hdr.len, 0)
|| !TEST_true(hdr.is_fin)
|| !TEST_size_t_eq(num_iov, 0))
goto err;
}
if (!TEST_true(ossl_quic_sstream_mark_transmitted_fin(sstream, 16)))
goto err;
/* Read more data; FIN should not be returned any more */
num_iov = OSSL_NELEM(iov);
if (!TEST_false(ossl_quic_sstream_get_stream_frame(sstream, 0, &hdr, iov,
&num_iov)))
goto err;
/* Acknowledge fin. */
if (!TEST_true(ossl_quic_sstream_mark_acked_fin(sstream)))
goto err;
testresult = 1;
err:
ossl_quic_sstream_free(sstream);
return testresult;
}
static int test_bulk(int idx)
{
int testresult = 0;
QUIC_SSTREAM *sstream = NULL;
OSSL_QUIC_FRAME_STREAM hdr;
OSSL_QTX_IOVEC iov[2];
size_t i, num_iov = 0, init_size = 8192, total_written = 0, l;
size_t consumed = 0, rd, expected = 0;
unsigned char *src_buf = NULL, *dst_buf = NULL;
unsigned char *ref_src_buf = NULL, *ref_dst_buf = NULL;
unsigned char *ref_dst_cur, *ref_src_cur, *dst_cur;
if (!TEST_ptr(sstream = ossl_quic_sstream_new(init_size)))
goto err;
if (!TEST_size_t_eq(ossl_quic_sstream_get_buffer_size(sstream), init_size))
goto err;
if (!TEST_ptr(src_buf = OPENSSL_zalloc(init_size)))
goto err;
if (!TEST_ptr(dst_buf = OPENSSL_malloc(init_size)))
goto err;
if (!TEST_ptr(ref_src_buf = OPENSSL_malloc(init_size)))
goto err;
if (!TEST_ptr(ref_dst_buf = OPENSSL_malloc(init_size)))
goto err;
/*
* Append a preliminary buffer to allow later code to exercise wraparound.
*/
if (!TEST_true(ossl_quic_sstream_append(sstream, src_buf, init_size / 2,
&consumed))
|| !TEST_size_t_eq(consumed, init_size / 2)
|| !TEST_true(ossl_quic_sstream_mark_transmitted(sstream, 0,
init_size / 2 - 1))
|| !TEST_true(ossl_quic_sstream_mark_acked(sstream, 0,
init_size / 2 - 1)))
goto err;
/* Generate a random buffer. */
for (i = 0; i < init_size; ++i)
src_buf[i] = (unsigned char)(test_random() & 0xFF);
/* Append bytes into the buffer in chunks of random length. */
ref_src_cur = ref_src_buf;
do {
l = (test_random() % init_size) + 1;
if (!TEST_true(ossl_quic_sstream_append(sstream, src_buf, l, &consumed)))
goto err;
memcpy(ref_src_cur, src_buf, consumed);
ref_src_cur += consumed;
total_written += consumed;
} while (consumed > 0);
if (!TEST_size_t_eq(ossl_quic_sstream_get_buffer_used(sstream), init_size)
|| !TEST_size_t_eq(ossl_quic_sstream_get_buffer_avail(sstream), 0))
goto err;
/*
* Randomly select bytes out of the buffer by marking them as transmitted.
* Record the remaining bytes, which should be the sequence of bytes
* returned.
*/
ref_src_cur = ref_src_buf;
ref_dst_cur = ref_dst_buf;
for (i = 0; i < consumed; ++i) {
if ((test_random() & 1) != 0) {
*ref_dst_cur++ = *ref_src_cur;
++expected;
} else if (!TEST_true(ossl_quic_sstream_mark_transmitted(sstream, i, i)))
goto err;
++ref_src_cur;
}
/* Exercise resize. */
if (!TEST_true(ossl_quic_sstream_set_buffer_size(sstream, init_size * 2))
|| !TEST_true(ossl_quic_sstream_set_buffer_size(sstream, init_size)))
goto err;
/* Readout and verification. */
dst_cur = dst_buf;
for (i = 0, rd = 0; rd < expected; ++i) {
num_iov = OSSL_NELEM(iov);
if (!TEST_true(ossl_quic_sstream_get_stream_frame(sstream, i, &hdr, iov,
&num_iov)))
goto err;
for (i = 0; i < num_iov; ++i) {
if (!TEST_size_t_le(iov[i].buf_len + rd, expected))
goto err;
memcpy(dst_cur, iov[i].buf, iov[i].buf_len);
dst_cur += iov[i].buf_len;
rd += iov[i].buf_len;
}
if (!TEST_uint64_t_eq(rd, hdr.len))
goto err;
}
if (!TEST_mem_eq(dst_buf, rd, ref_dst_buf, expected))
goto err;
testresult = 1;
err:
OPENSSL_free(src_buf);
OPENSSL_free(dst_buf);
OPENSSL_free(ref_src_buf);
OPENSSL_free(ref_dst_buf);
ossl_quic_sstream_free(sstream);
return testresult;
}
int setup_tests(void)
{
ADD_TEST(test_simple);
ADD_ALL_TESTS(test_bulk, 100);
return 1;
}

View File

@ -0,0 +1,19 @@
#! /usr/bin/env perl
# Copyright 2022 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
use OpenSSL::Test;
use OpenSSL::Test::Utils;
setup("test_quic_stream");
plan skip_all => "QUIC protocol is not supported by this OpenSSL build"
if disabled('quic');
plan tests => 1;
ok(run(test(["quic_stream_test"])));