diff --git a/support/next_to_fault.c b/support/next_to_fault.c index eb2457d12d..b60d5a5744 100644 --- a/support/next_to_fault.c +++ b/support/next_to_fault.c @@ -1,4 +1,4 @@ -/* Memory allocation next to an unmapped page. +/* Memory allocation either before or after an unmapped page. Copyright (C) 2017-2025 Free Software Foundation, Inc. This file is part of the GNU C Library. @@ -16,34 +16,58 @@ License along with the GNU C Library; if not, see . */ +#include #include #include #include #include #include -struct support_next_to_fault -support_next_to_fault_allocate (size_t size) +static struct support_next_to_fault +support_next_to_fault_allocate_any (size_t size, bool fault_after_alloc) { long page_size = sysconf (_SC_PAGE_SIZE); + long protect_offset = 0; + long buffer_offset = page_size; + TEST_VERIFY_EXIT (page_size > 0); struct support_next_to_fault result; result.region_size = roundup (size, page_size) + page_size; if (size + page_size <= size || result.region_size <= size) - FAIL_EXIT1 ("support_next_to_fault_allocate (%zu): overflow", size); + FAIL_EXIT1 ("%s (%zu): overflow", __func__, size); result.region_start = xmmap (NULL, result.region_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1); - /* Unmap the page after the allocation. */ - xmprotect (result.region_start + (result.region_size - page_size), - page_size, PROT_NONE); - /* Align the allocation within the region so that it ends just - before the PROT_NONE page. */ - result.buffer = result.region_start + result.region_size - page_size - size; + + if (fault_after_alloc) + { + protect_offset = result.region_size - page_size; + buffer_offset = protect_offset - size; + } + + /* Unmap the page before or after the allocation. */ + xmprotect (result.region_start + protect_offset, page_size, PROT_NONE); + /* Align the allocation within the region so that it starts after or ends + just before the PROT_NONE page. */ + result.buffer = result.region_start + buffer_offset; result.length = size; return result; } +/* Unmapped a page after the buffer */ +struct support_next_to_fault +support_next_to_fault_allocate (size_t size) +{ + return support_next_to_fault_allocate_any (size, true); +} + +/* Unmapped a page before the buffer */ +struct support_next_to_fault +support_next_to_fault_allocate_before (size_t size) +{ + return support_next_to_fault_allocate_any (size, false); +} + void support_next_to_fault_free (struct support_next_to_fault *ntf) { diff --git a/support/next_to_fault.h b/support/next_to_fault.h index 7112cc4c65..cec4b97332 100644 --- a/support/next_to_fault.h +++ b/support/next_to_fault.h @@ -41,6 +41,11 @@ struct support_next_to_fault fault). */ struct support_next_to_fault support_next_to_fault_allocate (size_t size); +/* Allocate a buffer of SIZE bytes just *after* a page which is mapped + with PROT_NONE (so that under-running the buffer will cause a + fault). */ +struct support_next_to_fault support_next_to_fault_allocate_before (size_t size); + /* Deallocate the memory region allocated by next_to_fault_allocate. */ void support_next_to_fault_free (struct support_next_to_fault *);