From 426ea611171da4e60ab4f3863fa3cc3683ae9547 Mon Sep 17 00:00:00 2001 From: Peter Geoghegan Date: Tue, 11 Mar 2025 10:35:56 -0400 Subject: [PATCH] nbtree: Make BTMaxItemSize into object-like macro. Make nbtree's "1/3 of a page limit" BTMaxItemSize function-like macro (which accepts a "page" argument) into an object-like macro that can be used from code that doesn't have convenient access to an nbtree page. Preparation for an upcoming patch that adds skip scan to nbtree. Parallel index scans that use skip scan will serialize datums (not just SAOP array subscripts) when scheduling primitive scans. BTMaxItemSize will be used by btestimateparallelscan to determine how much DSM to request. Author: Peter Geoghegan Discussion: https://postgr.es/m/CAH2-Wz=H_RG5weNGeUG_TkK87tRBnH9mGCQj6WpM4V4FNWKv2g@mail.gmail.com --- contrib/amcheck/verify_nbtree.c | 3 +-- src/backend/access/nbtree/nbtdedup.c | 6 +++--- src/backend/access/nbtree/nbtinsert.c | 2 +- src/backend/access/nbtree/nbtsort.c | 4 ++-- src/backend/access/nbtree/nbtutils.c | 7 +++---- src/backend/access/nbtree/nbtxlog.c | 2 +- src/include/access/nbtree.h | 8 ++++---- 7 files changed, 15 insertions(+), 17 deletions(-) diff --git a/contrib/amcheck/verify_nbtree.c b/contrib/amcheck/verify_nbtree.c index aac8c74f546..825b677c47c 100644 --- a/contrib/amcheck/verify_nbtree.c +++ b/contrib/amcheck/verify_nbtree.c @@ -1597,8 +1597,7 @@ bt_target_page_check(BtreeCheckState *state) */ lowersizelimit = skey->heapkeyspace && (P_ISLEAF(topaque) || BTreeTupleGetHeapTID(itup) == NULL); - if (tupsize > (lowersizelimit ? BTMaxItemSize(state->target) : - BTMaxItemSizeNoHeapTid(state->target))) + if (tupsize > (lowersizelimit ? BTMaxItemSize : BTMaxItemSizeNoHeapTid)) { ItemPointer tid = BTreeTupleGetPointsToTID(itup); char *itid, diff --git a/src/backend/access/nbtree/nbtdedup.c b/src/backend/access/nbtree/nbtdedup.c index cbe73675f86..08884116aec 100644 --- a/src/backend/access/nbtree/nbtdedup.c +++ b/src/backend/access/nbtree/nbtdedup.c @@ -84,7 +84,7 @@ _bt_dedup_pass(Relation rel, Buffer buf, IndexTuple newitem, Size newitemsz, state = (BTDedupState) palloc(sizeof(BTDedupStateData)); state->deduplicate = true; state->nmaxitems = 0; - state->maxpostingsize = Min(BTMaxItemSize(page) / 2, INDEX_SIZE_MASK); + state->maxpostingsize = Min(BTMaxItemSize / 2, INDEX_SIZE_MASK); /* Metadata about base tuple of current pending posting list */ state->base = NULL; state->baseoff = InvalidOffsetNumber; @@ -568,7 +568,7 @@ _bt_dedup_finish_pending(Page newpage, BTDedupState state) /* Use original, unchanged base tuple */ tuplesz = IndexTupleSize(state->base); Assert(tuplesz == MAXALIGN(IndexTupleSize(state->base))); - Assert(tuplesz <= BTMaxItemSize(newpage)); + Assert(tuplesz <= BTMaxItemSize); if (PageAddItem(newpage, (Item) state->base, tuplesz, tupoff, false, false) == InvalidOffsetNumber) elog(ERROR, "deduplication failed to add tuple to page"); @@ -588,7 +588,7 @@ _bt_dedup_finish_pending(Page newpage, BTDedupState state) state->intervals[state->nintervals].nitems = state->nitems; Assert(tuplesz == MAXALIGN(IndexTupleSize(final))); - Assert(tuplesz <= BTMaxItemSize(newpage)); + Assert(tuplesz <= BTMaxItemSize); if (PageAddItem(newpage, (Item) final, tuplesz, tupoff, false, false) == InvalidOffsetNumber) elog(ERROR, "deduplication failed to add tuple to page"); diff --git a/src/backend/access/nbtree/nbtinsert.c b/src/backend/access/nbtree/nbtinsert.c index 31fe1c3adec..aa82cede30a 100644 --- a/src/backend/access/nbtree/nbtinsert.c +++ b/src/backend/access/nbtree/nbtinsert.c @@ -827,7 +827,7 @@ _bt_findinsertloc(Relation rel, opaque = BTPageGetOpaque(page); /* Check 1/3 of a page restriction */ - if (unlikely(insertstate->itemsz > BTMaxItemSize(page))) + if (unlikely(insertstate->itemsz > BTMaxItemSize)) _bt_check_third_page(rel, heapRel, itup_key->heapkeyspace, page, insertstate->itup); diff --git a/src/backend/access/nbtree/nbtsort.c b/src/backend/access/nbtree/nbtsort.c index 7aba852db90..fa336ba0096 100644 --- a/src/backend/access/nbtree/nbtsort.c +++ b/src/backend/access/nbtree/nbtsort.c @@ -829,7 +829,7 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, IndexTuple itup, * make use of the reserved space. This should never fail on internal * pages. */ - if (unlikely(itupsz > BTMaxItemSize(npage))) + if (unlikely(itupsz > BTMaxItemSize)) _bt_check_third_page(wstate->index, wstate->heap, isleaf, npage, itup); @@ -1305,7 +1305,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2) */ dstate->maxpostingsize = MAXALIGN_DOWN((BLCKSZ * 10 / 100)) - sizeof(ItemIdData); - Assert(dstate->maxpostingsize <= BTMaxItemSize((Page) state->btps_buf) && + Assert(dstate->maxpostingsize <= BTMaxItemSize && dstate->maxpostingsize <= INDEX_SIZE_MASK); dstate->htids = palloc(dstate->maxpostingsize); diff --git a/src/backend/access/nbtree/nbtutils.c b/src/backend/access/nbtree/nbtutils.c index 693e43c674b..efe58beaaad 100644 --- a/src/backend/access/nbtree/nbtutils.c +++ b/src/backend/access/nbtree/nbtutils.c @@ -3245,7 +3245,7 @@ _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, itemsz = MAXALIGN(IndexTupleSize(newtup)); /* Double check item size against limit */ - if (itemsz <= BTMaxItemSize(page)) + if (itemsz <= BTMaxItemSize) return; /* @@ -3253,7 +3253,7 @@ _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, * index uses version 2 or version 3, or that page is an internal page, in * which case a slightly higher limit applies. */ - if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid(page)) + if (!needheaptidspace && itemsz <= BTMaxItemSizeNoHeapTid) return; /* @@ -3270,8 +3270,7 @@ _bt_check_third_page(Relation rel, Relation heap, bool needheaptidspace, errmsg("index row size %zu exceeds btree version %u maximum %zu for index \"%s\"", itemsz, needheaptidspace ? BTREE_VERSION : BTREE_NOVAC_VERSION, - needheaptidspace ? BTMaxItemSize(page) : - BTMaxItemSizeNoHeapTid(page), + needheaptidspace ? BTMaxItemSize : BTMaxItemSizeNoHeapTid, RelationGetRelationName(rel)), errdetail("Index row references tuple (%u,%u) in relation \"%s\".", ItemPointerGetBlockNumber(BTreeTupleGetHeapTID(newtup)), diff --git a/src/backend/access/nbtree/nbtxlog.c b/src/backend/access/nbtree/nbtxlog.c index fadd0617955..d31dd56732d 100644 --- a/src/backend/access/nbtree/nbtxlog.c +++ b/src/backend/access/nbtree/nbtxlog.c @@ -483,7 +483,7 @@ btree_xlog_dedup(XLogReaderState *record) state->deduplicate = true; /* unused */ state->nmaxitems = 0; /* unused */ /* Conservatively use larger maxpostingsize than primary */ - state->maxpostingsize = BTMaxItemSize(page); + state->maxpostingsize = BTMaxItemSize; state->base = NULL; state->baseoff = InvalidOffsetNumber; state->basetupsize = 0; diff --git a/src/include/access/nbtree.h b/src/include/access/nbtree.h index e4fdeca3402..0c43767f8c3 100644 --- a/src/include/access/nbtree.h +++ b/src/include/access/nbtree.h @@ -161,13 +161,13 @@ typedef struct BTMetaPageData * a heap index tuple to make space for a tiebreaker heap TID * attribute, which we account for here. */ -#define BTMaxItemSize(page) \ - (MAXALIGN_DOWN((PageGetPageSize(page) - \ +#define BTMaxItemSize \ + (MAXALIGN_DOWN((BLCKSZ - \ MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \ MAXALIGN(sizeof(BTPageOpaqueData))) / 3) - \ MAXALIGN(sizeof(ItemPointerData))) -#define BTMaxItemSizeNoHeapTid(page) \ - MAXALIGN_DOWN((PageGetPageSize(page) - \ +#define BTMaxItemSizeNoHeapTid \ + MAXALIGN_DOWN((BLCKSZ - \ MAXALIGN(SizeOfPageHeaderData + 3*sizeof(ItemIdData)) - \ MAXALIGN(sizeof(BTPageOpaqueData))) / 3)