mirror of
https://git.postgresql.org/git/postgresql.git
synced 2025-02-23 19:39:53 +08:00
Avoid running out of memory during hash_create, by not passing a
number-of-buckets that exceeds the size we actually plan to allow the hash table to grow to. Per trouble report from Sean Shanny.
This commit is contained in:
parent
e8aa10ee47
commit
7af16b2a25
@ -8,7 +8,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.87 2003/11/29 19:51:48 pgsql Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/executor/nodeIndexscan.c,v 1.88 2003/12/30 20:05:05 tgl Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -953,22 +953,28 @@ static void
|
||||
create_duphash(IndexScanState *node)
|
||||
{
|
||||
HASHCTL hash_ctl;
|
||||
long nbuckets;
|
||||
|
||||
node->iss_MaxHash = (SortMem * 1024L) /
|
||||
(MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(DupHashTabEntry)));
|
||||
MemSet(&hash_ctl, 0, sizeof(hash_ctl));
|
||||
hash_ctl.keysize = SizeOfIptrData;
|
||||
hash_ctl.entrysize = sizeof(DupHashTabEntry);
|
||||
hash_ctl.hash = tag_hash;
|
||||
hash_ctl.hcxt = CurrentMemoryContext;
|
||||
nbuckets = (long) ceil(node->ss.ps.plan->plan_rows);
|
||||
if (nbuckets < 1)
|
||||
nbuckets = 1;
|
||||
if (nbuckets > node->iss_MaxHash)
|
||||
nbuckets = node->iss_MaxHash;
|
||||
node->iss_DupHash = hash_create("DupHashTable",
|
||||
(long) ceil(node->ss.ps.plan->plan_rows),
|
||||
nbuckets,
|
||||
&hash_ctl,
|
||||
HASH_ELEM | HASH_FUNCTION | HASH_CONTEXT);
|
||||
if (node->iss_DupHash == NULL)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_OUT_OF_MEMORY),
|
||||
errmsg("out of memory")));
|
||||
node->iss_MaxHash = (SortMem * 1024L) /
|
||||
(MAXALIGN(sizeof(HASHELEMENT)) + MAXALIGN(sizeof(DupHashTabEntry)));
|
||||
}
|
||||
|
||||
int
|
||||
|
Loading…
Reference in New Issue
Block a user