From fa53723cdb6f0338558e57a2a0a6459c00a1bc5f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20B=C3=A9rat?= Date: Fri, 29 Nov 2024 14:48:43 +0100 Subject: [PATCH] support: Add support_next_to_fault_before support function Refactor the support_next_to_fault and add the support_next_to_fault_before method returns a buffer with a protected page before it, to be able to test buffer underflow accesses. Reviewed-by: Tulio Magno Quites Machado Filho --- support/next_to_fault.c | 44 +++++++++++++++++++++++++++++++---------- support/next_to_fault.h | 5 +++++ 2 files changed, 39 insertions(+), 10 deletions(-) diff --git a/support/next_to_fault.c b/support/next_to_fault.c index eb2457d12d..b60d5a5744 100644 --- a/support/next_to_fault.c +++ b/support/next_to_fault.c @@ -1,4 +1,4 @@ -/* Memory allocation next to an unmapped page. +/* Memory allocation either before or after an unmapped page. Copyright (C) 2017-2025 Free Software Foundation, Inc. This file is part of the GNU C Library. @@ -16,34 +16,58 @@ License along with the GNU C Library; if not, see . */ +#include #include #include #include #include #include -struct support_next_to_fault -support_next_to_fault_allocate (size_t size) +static struct support_next_to_fault +support_next_to_fault_allocate_any (size_t size, bool fault_after_alloc) { long page_size = sysconf (_SC_PAGE_SIZE); + long protect_offset = 0; + long buffer_offset = page_size; + TEST_VERIFY_EXIT (page_size > 0); struct support_next_to_fault result; result.region_size = roundup (size, page_size) + page_size; if (size + page_size <= size || result.region_size <= size) - FAIL_EXIT1 ("support_next_to_fault_allocate (%zu): overflow", size); + FAIL_EXIT1 ("%s (%zu): overflow", __func__, size); result.region_start = xmmap (NULL, result.region_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1); - /* Unmap the page after the allocation. */ - xmprotect (result.region_start + (result.region_size - page_size), - page_size, PROT_NONE); - /* Align the allocation within the region so that it ends just - before the PROT_NONE page. */ - result.buffer = result.region_start + result.region_size - page_size - size; + + if (fault_after_alloc) + { + protect_offset = result.region_size - page_size; + buffer_offset = protect_offset - size; + } + + /* Unmap the page before or after the allocation. */ + xmprotect (result.region_start + protect_offset, page_size, PROT_NONE); + /* Align the allocation within the region so that it starts after or ends + just before the PROT_NONE page. */ + result.buffer = result.region_start + buffer_offset; result.length = size; return result; } +/* Unmapped a page after the buffer */ +struct support_next_to_fault +support_next_to_fault_allocate (size_t size) +{ + return support_next_to_fault_allocate_any (size, true); +} + +/* Unmapped a page before the buffer */ +struct support_next_to_fault +support_next_to_fault_allocate_before (size_t size) +{ + return support_next_to_fault_allocate_any (size, false); +} + void support_next_to_fault_free (struct support_next_to_fault *ntf) { diff --git a/support/next_to_fault.h b/support/next_to_fault.h index 7112cc4c65..cec4b97332 100644 --- a/support/next_to_fault.h +++ b/support/next_to_fault.h @@ -41,6 +41,11 @@ struct support_next_to_fault fault). */ struct support_next_to_fault support_next_to_fault_allocate (size_t size); +/* Allocate a buffer of SIZE bytes just *after* a page which is mapped + with PROT_NONE (so that under-running the buffer will cause a + fault). */ +struct support_next_to_fault support_next_to_fault_allocate_before (size_t size); + /* Deallocate the memory region allocated by next_to_fault_allocate. */ void support_next_to_fault_free (struct support_next_to_fault *);