mirror of
https://git.postgresql.org/git/postgresql.git
synced 2025-01-12 18:34:36 +08:00
Permit super-MaxAllocSize allocations with MemoryContextAllocHuge().
The MaxAllocSize guard is convenient for most callers, because it reduces the need for careful attention to overflow, data type selection, and the SET_VARSIZE() limit. A handful of callers are happy to navigate those hazards in exchange for the ability to allocate a larger chunk. Introduce MemoryContextAllocHuge() and repalloc_huge(). Use this in tuplesort.c and tuplestore.c, enabling internal sorts of up to INT_MAX tuples, a factor-of-48 increase. In particular, B-tree index builds can now benefit from much-larger maintenance_work_mem settings. Reviewed by Stephen Frost, Simon Riggs and Jeff Janes.
This commit is contained in:
parent
9ef86cd994
commit
263865a489
src
@ -458,6 +458,7 @@ AllocSetContextCreate(MemoryContext parent,
|
||||
maxBlockSize = MAXALIGN(maxBlockSize);
|
||||
if (maxBlockSize < initBlockSize)
|
||||
maxBlockSize = initBlockSize;
|
||||
Assert(AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
|
||||
context->initBlockSize = initBlockSize;
|
||||
context->maxBlockSize = maxBlockSize;
|
||||
context->nextBlockSize = initBlockSize;
|
||||
@ -643,6 +644,10 @@ AllocSetDelete(MemoryContext context)
|
||||
* AllocSetAlloc
|
||||
* Returns pointer to allocated memory of given size; memory is added
|
||||
* to the set.
|
||||
*
|
||||
* No request may exceed:
|
||||
* MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
|
||||
* All callers use a much-lower limit.
|
||||
*/
|
||||
static void *
|
||||
AllocSetAlloc(MemoryContext context, Size size)
|
||||
|
@ -455,14 +455,7 @@ MemoryContextContains(MemoryContext context, void *pointer)
|
||||
header = (StandardChunkHeader *)
|
||||
((char *) pointer - STANDARDCHUNKHEADERSIZE);
|
||||
|
||||
/*
|
||||
* If the context link doesn't match then we certainly have a non-member
|
||||
* chunk. Also check for a reasonable-looking size as extra guard against
|
||||
* being fooled by bogus pointers.
|
||||
*/
|
||||
if (header->context == context && AllocSizeIsValid(header->size))
|
||||
return true;
|
||||
return false;
|
||||
return header->context == context;
|
||||
}
|
||||
|
||||
/*--------------------
|
||||
@ -757,6 +750,71 @@ repalloc(void *pointer, Size size)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* MemoryContextAllocHuge
|
||||
* Allocate (possibly-expansive) space within the specified context.
|
||||
*
|
||||
* See considerations in comment at MaxAllocHugeSize.
|
||||
*/
|
||||
void *
|
||||
MemoryContextAllocHuge(MemoryContext context, Size size)
|
||||
{
|
||||
void *ret;
|
||||
|
||||
AssertArg(MemoryContextIsValid(context));
|
||||
|
||||
if (!AllocHugeSizeIsValid(size))
|
||||
elog(ERROR, "invalid memory alloc request size %lu",
|
||||
(unsigned long) size);
|
||||
|
||||
context->isReset = false;
|
||||
|
||||
ret = (*context->methods->alloc) (context, size);
|
||||
VALGRIND_MEMPOOL_ALLOC(context, ret, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* repalloc_huge
|
||||
* Adjust the size of a previously allocated chunk, permitting a large
|
||||
* value. The previous allocation need not have been "huge".
|
||||
*/
|
||||
void *
|
||||
repalloc_huge(void *pointer, Size size)
|
||||
{
|
||||
MemoryContext context;
|
||||
void *ret;
|
||||
|
||||
if (!AllocHugeSizeIsValid(size))
|
||||
elog(ERROR, "invalid memory alloc request size %lu",
|
||||
(unsigned long) size);
|
||||
|
||||
/*
|
||||
* Try to detect bogus pointers handed to us, poorly though we can.
|
||||
* Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
|
||||
* allocated chunk.
|
||||
*/
|
||||
Assert(pointer != NULL);
|
||||
Assert(pointer == (void *) MAXALIGN(pointer));
|
||||
|
||||
/*
|
||||
* OK, it's probably safe to look at the chunk header.
|
||||
*/
|
||||
context = ((StandardChunkHeader *)
|
||||
((char *) pointer - STANDARDCHUNKHEADERSIZE))->context;
|
||||
|
||||
AssertArg(MemoryContextIsValid(context));
|
||||
|
||||
/* isReset must be false already */
|
||||
Assert(!context->isReset);
|
||||
|
||||
ret = (*context->methods->realloc) (context, pointer, size);
|
||||
VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* MemoryContextStrdup
|
||||
* Like strdup(), but allocate from the specified context
|
||||
|
@ -211,8 +211,8 @@ struct Tuplesortstate
|
||||
* tuples to return? */
|
||||
bool boundUsed; /* true if we made use of a bounded heap */
|
||||
int bound; /* if bounded, the maximum number of tuples */
|
||||
long availMem; /* remaining memory available, in bytes */
|
||||
long allowedMem; /* total memory allowed, in bytes */
|
||||
Size availMem; /* remaining memory available, in bytes */
|
||||
Size allowedMem; /* total memory allowed, in bytes */
|
||||
int maxTapes; /* number of tapes (Knuth's T) */
|
||||
int tapeRange; /* maxTapes-1 (Knuth's P) */
|
||||
MemoryContext sortcontext; /* memory context holding all sort data */
|
||||
@ -308,7 +308,7 @@ struct Tuplesortstate
|
||||
int *mergenext; /* first preread tuple for each source */
|
||||
int *mergelast; /* last preread tuple for each source */
|
||||
int *mergeavailslots; /* slots left for prereading each tape */
|
||||
long *mergeavailmem; /* availMem for prereading each tape */
|
||||
Size *mergeavailmem; /* availMem for prereading each tape */
|
||||
int mergefreelist; /* head of freelist of recycled slots */
|
||||
int mergefirstfree; /* first slot never used in this merge */
|
||||
|
||||
@ -961,25 +961,26 @@ tuplesort_end(Tuplesortstate *state)
|
||||
}
|
||||
|
||||
/*
|
||||
* Grow the memtuples[] array, if possible within our memory constraint.
|
||||
* Return TRUE if we were able to enlarge the array, FALSE if not.
|
||||
* Grow the memtuples[] array, if possible within our memory constraint. We
|
||||
* must not exceed INT_MAX tuples in memory or the caller-provided memory
|
||||
* limit. Return TRUE if we were able to enlarge the array, FALSE if not.
|
||||
*
|
||||
* Normally, at each increment we double the size of the array. When we no
|
||||
* longer have enough memory to do that, we attempt one last, smaller increase
|
||||
* (and then clear the growmemtuples flag so we don't try any more). That
|
||||
* allows us to use allowedMem as fully as possible; sticking to the pure
|
||||
* doubling rule could result in almost half of allowedMem going unused.
|
||||
* Because availMem moves around with tuple addition/removal, we need some
|
||||
* rule to prevent making repeated small increases in memtupsize, which would
|
||||
* just be useless thrashing. The growmemtuples flag accomplishes that and
|
||||
* also prevents useless recalculations in this function.
|
||||
* Normally, at each increment we double the size of the array. When doing
|
||||
* that would exceed a limit, we attempt one last, smaller increase (and then
|
||||
* clear the growmemtuples flag so we don't try any more). That allows us to
|
||||
* use memory as fully as permitted; sticking to the pure doubling rule could
|
||||
* result in almost half going unused. Because availMem moves around with
|
||||
* tuple addition/removal, we need some rule to prevent making repeated small
|
||||
* increases in memtupsize, which would just be useless thrashing. The
|
||||
* growmemtuples flag accomplishes that and also prevents useless
|
||||
* recalculations in this function.
|
||||
*/
|
||||
static bool
|
||||
grow_memtuples(Tuplesortstate *state)
|
||||
{
|
||||
int newmemtupsize;
|
||||
int memtupsize = state->memtupsize;
|
||||
long memNowUsed = state->allowedMem - state->availMem;
|
||||
Size memNowUsed = state->allowedMem - state->availMem;
|
||||
|
||||
/* Forget it if we've already maxed out memtuples, per comment above */
|
||||
if (!state->growmemtuples)
|
||||
@ -989,14 +990,16 @@ grow_memtuples(Tuplesortstate *state)
|
||||
if (memNowUsed <= state->availMem)
|
||||
{
|
||||
/*
|
||||
* It is surely safe to double memtupsize if we've used no more than
|
||||
* half of allowedMem.
|
||||
*
|
||||
* Note: it might seem that we need to worry about memtupsize * 2
|
||||
* overflowing an int, but the MaxAllocSize clamp applied below
|
||||
* ensures the existing memtupsize can't be large enough for that.
|
||||
* We've used no more than half of allowedMem; double our usage,
|
||||
* clamping at INT_MAX.
|
||||
*/
|
||||
newmemtupsize = memtupsize * 2;
|
||||
if (memtupsize < INT_MAX / 2)
|
||||
newmemtupsize = memtupsize * 2;
|
||||
else
|
||||
{
|
||||
newmemtupsize = INT_MAX;
|
||||
state->growmemtuples = false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1012,7 +1015,8 @@ grow_memtuples(Tuplesortstate *state)
|
||||
* we've already seen, and thus we can extrapolate from the space
|
||||
* consumption so far to estimate an appropriate new size for the
|
||||
* memtuples array. The optimal value might be higher or lower than
|
||||
* this estimate, but it's hard to know that in advance.
|
||||
* this estimate, but it's hard to know that in advance. We again
|
||||
* clamp at INT_MAX tuples.
|
||||
*
|
||||
* This calculation is safe against enlarging the array so much that
|
||||
* LACKMEM becomes true, because the memory currently used includes
|
||||
@ -1020,16 +1024,18 @@ grow_memtuples(Tuplesortstate *state)
|
||||
* new array elements even if no other memory were currently used.
|
||||
*
|
||||
* We do the arithmetic in float8, because otherwise the product of
|
||||
* memtupsize and allowedMem could overflow. (A little algebra shows
|
||||
* that grow_ratio must be less than 2 here, so we are not risking
|
||||
* integer overflow this way.) Any inaccuracy in the result should be
|
||||
* insignificant; but even if we computed a completely insane result,
|
||||
* the checks below will prevent anything really bad from happening.
|
||||
* memtupsize and allowedMem could overflow. Any inaccuracy in the
|
||||
* result should be insignificant; but even if we computed a
|
||||
* completely insane result, the checks below will prevent anything
|
||||
* really bad from happening.
|
||||
*/
|
||||
double grow_ratio;
|
||||
|
||||
grow_ratio = (double) state->allowedMem / (double) memNowUsed;
|
||||
newmemtupsize = (int) (memtupsize * grow_ratio);
|
||||
if (memtupsize * grow_ratio < INT_MAX)
|
||||
newmemtupsize = (int) (memtupsize * grow_ratio);
|
||||
else
|
||||
newmemtupsize = INT_MAX;
|
||||
|
||||
/* We won't make any further enlargement attempts */
|
||||
state->growmemtuples = false;
|
||||
@ -1040,12 +1046,13 @@ grow_memtuples(Tuplesortstate *state)
|
||||
goto noalloc;
|
||||
|
||||
/*
|
||||
* On a 64-bit machine, allowedMem could be more than MaxAllocSize. Clamp
|
||||
* to ensure our request won't be rejected by palloc.
|
||||
* On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
|
||||
* to ensure our request won't be rejected. Note that we can easily
|
||||
* exhaust address space before facing this outcome.
|
||||
*/
|
||||
if ((Size) newmemtupsize >= MaxAllocSize / sizeof(SortTuple))
|
||||
if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
|
||||
{
|
||||
newmemtupsize = (int) (MaxAllocSize / sizeof(SortTuple));
|
||||
newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
|
||||
state->growmemtuples = false; /* can't grow any more */
|
||||
}
|
||||
|
||||
@ -1060,15 +1067,15 @@ grow_memtuples(Tuplesortstate *state)
|
||||
* palloc would be treating both old and new arrays as separate chunks.
|
||||
* But we'll check LACKMEM explicitly below just in case.)
|
||||
*/
|
||||
if (state->availMem < (long) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
|
||||
if (state->availMem < (Size) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
|
||||
goto noalloc;
|
||||
|
||||
/* OK, do it */
|
||||
FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
|
||||
state->memtupsize = newmemtupsize;
|
||||
state->memtuples = (SortTuple *)
|
||||
repalloc(state->memtuples,
|
||||
state->memtupsize * sizeof(SortTuple));
|
||||
repalloc_huge(state->memtuples,
|
||||
state->memtupsize * sizeof(SortTuple));
|
||||
USEMEM(state, GetMemoryChunkSpace(state->memtuples));
|
||||
if (LACKMEM(state))
|
||||
elog(ERROR, "unexpected out-of-memory situation during sort");
|
||||
@ -1715,7 +1722,7 @@ tuplesort_getdatum(Tuplesortstate *state, bool forward,
|
||||
* This is exported for use by the planner. allowedMem is in bytes.
|
||||
*/
|
||||
int
|
||||
tuplesort_merge_order(long allowedMem)
|
||||
tuplesort_merge_order(Size allowedMem)
|
||||
{
|
||||
int mOrder;
|
||||
|
||||
@ -1749,7 +1756,7 @@ inittapes(Tuplesortstate *state)
|
||||
int maxTapes,
|
||||
ntuples,
|
||||
j;
|
||||
long tapeSpace;
|
||||
Size tapeSpace;
|
||||
|
||||
/* Compute number of tapes to use: merge order plus 1 */
|
||||
maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
|
||||
@ -1798,7 +1805,7 @@ inittapes(Tuplesortstate *state)
|
||||
state->mergenext = (int *) palloc0(maxTapes * sizeof(int));
|
||||
state->mergelast = (int *) palloc0(maxTapes * sizeof(int));
|
||||
state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int));
|
||||
state->mergeavailmem = (long *) palloc0(maxTapes * sizeof(long));
|
||||
state->mergeavailmem = (Size *) palloc0(maxTapes * sizeof(Size));
|
||||
state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
|
||||
state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
|
||||
state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
|
||||
@ -2026,7 +2033,7 @@ mergeonerun(Tuplesortstate *state)
|
||||
int srcTape;
|
||||
int tupIndex;
|
||||
SortTuple *tup;
|
||||
long priorAvail,
|
||||
Size priorAvail,
|
||||
spaceFreed;
|
||||
|
||||
/*
|
||||
@ -2100,7 +2107,7 @@ beginmerge(Tuplesortstate *state)
|
||||
int tapenum;
|
||||
int srcTape;
|
||||
int slotsPerTape;
|
||||
long spacePerTape;
|
||||
Size spacePerTape;
|
||||
|
||||
/* Heap should be empty here */
|
||||
Assert(state->memtupcount == 0);
|
||||
@ -2221,7 +2228,7 @@ mergeprereadone(Tuplesortstate *state, int srcTape)
|
||||
unsigned int tuplen;
|
||||
SortTuple stup;
|
||||
int tupIndex;
|
||||
long priorAvail,
|
||||
Size priorAvail,
|
||||
spaceUsed;
|
||||
|
||||
if (!state->mergeactive[srcTape])
|
||||
|
@ -104,8 +104,8 @@ struct Tuplestorestate
|
||||
bool backward; /* store extra length words in file? */
|
||||
bool interXact; /* keep open through transactions? */
|
||||
bool truncated; /* tuplestore_trim has removed tuples? */
|
||||
long availMem; /* remaining memory available, in bytes */
|
||||
long allowedMem; /* total memory allowed, in bytes */
|
||||
Size availMem; /* remaining memory available, in bytes */
|
||||
Size allowedMem; /* total memory allowed, in bytes */
|
||||
BufFile *myfile; /* underlying file, or NULL if none */
|
||||
MemoryContext context; /* memory context for holding tuples */
|
||||
ResourceOwner resowner; /* resowner for holding temp files */
|
||||
@ -531,25 +531,26 @@ tuplestore_ateof(Tuplestorestate *state)
|
||||
}
|
||||
|
||||
/*
|
||||
* Grow the memtuples[] array, if possible within our memory constraint.
|
||||
* Return TRUE if we were able to enlarge the array, FALSE if not.
|
||||
* Grow the memtuples[] array, if possible within our memory constraint. We
|
||||
* must not exceed INT_MAX tuples in memory or the caller-provided memory
|
||||
* limit. Return TRUE if we were able to enlarge the array, FALSE if not.
|
||||
*
|
||||
* Normally, at each increment we double the size of the array. When we no
|
||||
* longer have enough memory to do that, we attempt one last, smaller increase
|
||||
* (and then clear the growmemtuples flag so we don't try any more). That
|
||||
* allows us to use allowedMem as fully as possible; sticking to the pure
|
||||
* doubling rule could result in almost half of allowedMem going unused.
|
||||
* Because availMem moves around with tuple addition/removal, we need some
|
||||
* rule to prevent making repeated small increases in memtupsize, which would
|
||||
* just be useless thrashing. The growmemtuples flag accomplishes that and
|
||||
* also prevents useless recalculations in this function.
|
||||
* Normally, at each increment we double the size of the array. When doing
|
||||
* that would exceed a limit, we attempt one last, smaller increase (and then
|
||||
* clear the growmemtuples flag so we don't try any more). That allows us to
|
||||
* use memory as fully as permitted; sticking to the pure doubling rule could
|
||||
* result in almost half going unused. Because availMem moves around with
|
||||
* tuple addition/removal, we need some rule to prevent making repeated small
|
||||
* increases in memtupsize, which would just be useless thrashing. The
|
||||
* growmemtuples flag accomplishes that and also prevents useless
|
||||
* recalculations in this function.
|
||||
*/
|
||||
static bool
|
||||
grow_memtuples(Tuplestorestate *state)
|
||||
{
|
||||
int newmemtupsize;
|
||||
int memtupsize = state->memtupsize;
|
||||
long memNowUsed = state->allowedMem - state->availMem;
|
||||
Size memNowUsed = state->allowedMem - state->availMem;
|
||||
|
||||
/* Forget it if we've already maxed out memtuples, per comment above */
|
||||
if (!state->growmemtuples)
|
||||
@ -559,14 +560,16 @@ grow_memtuples(Tuplestorestate *state)
|
||||
if (memNowUsed <= state->availMem)
|
||||
{
|
||||
/*
|
||||
* It is surely safe to double memtupsize if we've used no more than
|
||||
* half of allowedMem.
|
||||
*
|
||||
* Note: it might seem that we need to worry about memtupsize * 2
|
||||
* overflowing an int, but the MaxAllocSize clamp applied below
|
||||
* ensures the existing memtupsize can't be large enough for that.
|
||||
* We've used no more than half of allowedMem; double our usage,
|
||||
* clamping at INT_MAX.
|
||||
*/
|
||||
newmemtupsize = memtupsize * 2;
|
||||
if (memtupsize < INT_MAX / 2)
|
||||
newmemtupsize = memtupsize * 2;
|
||||
else
|
||||
{
|
||||
newmemtupsize = INT_MAX;
|
||||
state->growmemtuples = false;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -582,7 +585,8 @@ grow_memtuples(Tuplestorestate *state)
|
||||
* we've already seen, and thus we can extrapolate from the space
|
||||
* consumption so far to estimate an appropriate new size for the
|
||||
* memtuples array. The optimal value might be higher or lower than
|
||||
* this estimate, but it's hard to know that in advance.
|
||||
* this estimate, but it's hard to know that in advance. We again
|
||||
* clamp at INT_MAX tuples.
|
||||
*
|
||||
* This calculation is safe against enlarging the array so much that
|
||||
* LACKMEM becomes true, because the memory currently used includes
|
||||
@ -590,16 +594,18 @@ grow_memtuples(Tuplestorestate *state)
|
||||
* new array elements even if no other memory were currently used.
|
||||
*
|
||||
* We do the arithmetic in float8, because otherwise the product of
|
||||
* memtupsize and allowedMem could overflow. (A little algebra shows
|
||||
* that grow_ratio must be less than 2 here, so we are not risking
|
||||
* integer overflow this way.) Any inaccuracy in the result should be
|
||||
* insignificant; but even if we computed a completely insane result,
|
||||
* the checks below will prevent anything really bad from happening.
|
||||
* memtupsize and allowedMem could overflow. Any inaccuracy in the
|
||||
* result should be insignificant; but even if we computed a
|
||||
* completely insane result, the checks below will prevent anything
|
||||
* really bad from happening.
|
||||
*/
|
||||
double grow_ratio;
|
||||
|
||||
grow_ratio = (double) state->allowedMem / (double) memNowUsed;
|
||||
newmemtupsize = (int) (memtupsize * grow_ratio);
|
||||
if (memtupsize * grow_ratio < INT_MAX)
|
||||
newmemtupsize = (int) (memtupsize * grow_ratio);
|
||||
else
|
||||
newmemtupsize = INT_MAX;
|
||||
|
||||
/* We won't make any further enlargement attempts */
|
||||
state->growmemtuples = false;
|
||||
@ -610,12 +616,13 @@ grow_memtuples(Tuplestorestate *state)
|
||||
goto noalloc;
|
||||
|
||||
/*
|
||||
* On a 64-bit machine, allowedMem could be more than MaxAllocSize. Clamp
|
||||
* to ensure our request won't be rejected by palloc.
|
||||
* On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
|
||||
* to ensure our request won't be rejected. Note that we can easily
|
||||
* exhaust address space before facing this outcome.
|
||||
*/
|
||||
if ((Size) newmemtupsize >= MaxAllocSize / sizeof(void *))
|
||||
if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(void *))
|
||||
{
|
||||
newmemtupsize = (int) (MaxAllocSize / sizeof(void *));
|
||||
newmemtupsize = (int) (MaxAllocHugeSize / sizeof(void *));
|
||||
state->growmemtuples = false; /* can't grow any more */
|
||||
}
|
||||
|
||||
@ -630,15 +637,15 @@ grow_memtuples(Tuplestorestate *state)
|
||||
* palloc would be treating both old and new arrays as separate chunks.
|
||||
* But we'll check LACKMEM explicitly below just in case.)
|
||||
*/
|
||||
if (state->availMem < (long) ((newmemtupsize - memtupsize) * sizeof(void *)))
|
||||
if (state->availMem < (Size) ((newmemtupsize - memtupsize) * sizeof(void *)))
|
||||
goto noalloc;
|
||||
|
||||
/* OK, do it */
|
||||
FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
|
||||
state->memtupsize = newmemtupsize;
|
||||
state->memtuples = (void **)
|
||||
repalloc(state->memtuples,
|
||||
state->memtupsize * sizeof(void *));
|
||||
repalloc_huge(state->memtuples,
|
||||
state->memtupsize * sizeof(void *));
|
||||
USEMEM(state, GetMemoryChunkSpace(state->memtuples));
|
||||
if (LACKMEM(state))
|
||||
elog(ERROR, "unexpected out-of-memory situation during sort");
|
||||
|
@ -21,26 +21,30 @@
|
||||
|
||||
|
||||
/*
|
||||
* MaxAllocSize
|
||||
* Quasi-arbitrary limit on size of allocations.
|
||||
* MaxAllocSize, MaxAllocHugeSize
|
||||
* Quasi-arbitrary limits on size of allocations.
|
||||
*
|
||||
* Note:
|
||||
* There is no guarantee that allocations smaller than MaxAllocSize
|
||||
* will succeed. Allocation requests larger than MaxAllocSize will
|
||||
* be summarily denied.
|
||||
* There is no guarantee that smaller allocations will succeed, but
|
||||
* larger requests will be summarily denied.
|
||||
*
|
||||
* XXX This is deliberately chosen to correspond to the limiting size
|
||||
* of varlena objects under TOAST. See VARSIZE_4B() and related macros
|
||||
* in postgres.h. Many datatypes assume that any allocatable size can
|
||||
* be represented in a varlena header.
|
||||
*
|
||||
* XXX Also, various places in aset.c assume they can compute twice an
|
||||
* allocation's size without overflow, so beware of raising this.
|
||||
* palloc() enforces MaxAllocSize, chosen to correspond to the limiting size
|
||||
* of varlena objects under TOAST. See VARSIZE_4B() and related macros in
|
||||
* postgres.h. Many datatypes assume that any allocatable size can be
|
||||
* represented in a varlena header. This limit also permits a caller to use
|
||||
* an "int" variable for an index into or length of an allocation. Callers
|
||||
* careful to avoid these hazards can access the higher limit with
|
||||
* MemoryContextAllocHuge(). Both limits permit code to assume that it may
|
||||
* compute twice an allocation's size without overflow.
|
||||
*/
|
||||
#define MaxAllocSize ((Size) 0x3fffffff) /* 1 gigabyte - 1 */
|
||||
|
||||
#define AllocSizeIsValid(size) ((Size) (size) <= MaxAllocSize)
|
||||
|
||||
#define MaxAllocHugeSize ((Size) -1 >> 1) /* SIZE_MAX / 2 */
|
||||
|
||||
#define AllocHugeSizeIsValid(size) ((Size) (size) <= MaxAllocHugeSize)
|
||||
|
||||
/*
|
||||
* All chunks allocated by any memory context manager are required to be
|
||||
* preceded by a StandardChunkHeader at a spacing of STANDARDCHUNKHEADERSIZE.
|
||||
|
@ -51,6 +51,10 @@ extern void *MemoryContextAlloc(MemoryContext context, Size size);
|
||||
extern void *MemoryContextAllocZero(MemoryContext context, Size size);
|
||||
extern void *MemoryContextAllocZeroAligned(MemoryContext context, Size size);
|
||||
|
||||
/* Higher-limit allocators. */
|
||||
extern void *MemoryContextAllocHuge(MemoryContext context, Size size);
|
||||
extern void *repalloc_huge(void *pointer, Size size);
|
||||
|
||||
/*
|
||||
* The result of palloc() is always word-aligned, so we can skip testing
|
||||
* alignment of the pointer when deciding which MemSet variant to use.
|
||||
|
@ -106,7 +106,7 @@ extern void tuplesort_get_stats(Tuplesortstate *state,
|
||||
const char **spaceType,
|
||||
long *spaceUsed);
|
||||
|
||||
extern int tuplesort_merge_order(long allowedMem);
|
||||
extern int tuplesort_merge_order(Size allowedMem);
|
||||
|
||||
/*
|
||||
* These routines may only be called if randomAccess was specified 'true'.
|
||||
|
Loading…
Reference in New Issue
Block a user