postgresql/contrib/bloom/blscan.c

174 lines
3.5 KiB
C
Raw Normal View History

/*-------------------------------------------------------------------------
*
* blscan.c
* Bloom index scan functions.
*
* Copyright (c) 2016-2019, PostgreSQL Global Development Group
*
* IDENTIFICATION
* contrib/bloom/blscan.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "access/relscan.h"
#include "pgstat.h"
#include "miscadmin.h"
#include "storage/bufmgr.h"
#include "storage/lmgr.h"
#include "utils/memutils.h"
#include "utils/rel.h"
#include "bloom.h"
/*
* Begin scan of bloom index.
*/
IndexScanDesc
blbeginscan(Relation r, int nkeys, int norderbys)
{
IndexScanDesc scan;
BloomScanOpaque so;
scan = RelationGetIndexScan(r, nkeys, norderbys);
so = (BloomScanOpaque) palloc(sizeof(BloomScanOpaqueData));
initBloomState(&so->state, scan->indexRelation);
so->sign = NULL;
scan->opaque = so;
return scan;
}
/*
* Rescan a bloom index.
*/
void
blrescan(IndexScanDesc scan, ScanKey scankey, int nscankeys,
ScanKey orderbys, int norderbys)
{
BloomScanOpaque so = (BloomScanOpaque) scan->opaque;
if (so->sign)
pfree(so->sign);
so->sign = NULL;
if (scankey && scan->numberOfKeys > 0)
{
memmove(scan->keyData, scankey,
scan->numberOfKeys * sizeof(ScanKeyData));
}
}
/*
* End scan of bloom index.
*/
void
blendscan(IndexScanDesc scan)
{
BloomScanOpaque so = (BloomScanOpaque) scan->opaque;
if (so->sign)
pfree(so->sign);
so->sign = NULL;
}
/*
* Insert all matching tuples into a bitmap.
*/
int64
blgetbitmap(IndexScanDesc scan, TIDBitmap *tbm)
{
int64 ntids = 0;
BlockNumber blkno = BLOOM_HEAD_BLKNO,
npages;
int i;
BufferAccessStrategy bas;
BloomScanOpaque so = (BloomScanOpaque) scan->opaque;
if (so->sign == NULL)
{
/* New search: have to calculate search signature */
ScanKey skey = scan->keyData;
so->sign = palloc0(sizeof(BloomSignatureWord) * so->state.opts.bloomLength);
for (i = 0; i < scan->numberOfKeys; i++)
{
/*
* Assume bloom-indexable operators to be strict, so nothing could
* be found for NULL key.
*/
if (skey->sk_flags & SK_ISNULL)
{
pfree(so->sign);
so->sign = NULL;
return 0;
}
/* Add next value to the signature */
signValue(&so->state, so->sign, skey->sk_argument,
skey->sk_attno - 1);
skey++;
}
}
/*
* We're going to read the whole index. This is why we use appropriate
* buffer access strategy.
*/
bas = GetAccessStrategy(BAS_BULKREAD);
npages = RelationGetNumberOfBlocks(scan->indexRelation);
for (blkno = BLOOM_HEAD_BLKNO; blkno < npages; blkno++)
{
Buffer buffer;
Page page;
buffer = ReadBufferExtended(scan->indexRelation, MAIN_FORKNUM,
blkno, RBM_NORMAL, bas);
LockBuffer(buffer, BUFFER_LOCK_SHARE);
page = BufferGetPage(buffer);
TestForOldSnapshot(scan->xs_snapshot, scan->indexRelation, page);
Fix assorted bugs in contrib/bloom. In blinsert(), cope with the possibility that a page we pull from the notFullPage list is marked BLOOM_DELETED. This could happen if VACUUM recently marked it deleted but hasn't (yet) updated the metapage. We can re-use such a page safely, but we *must* reinitialize it so that it's no longer marked deleted. Fix blvacuum() so that it updates the notFullPage list even if it's going to update it to empty. The previous "optimization" of skipping the update seems pretty dubious, since it means that the next blinsert() will uselessly visit whatever pages we left in the list. Uniformly treat PageIsNew pages the same as deleted pages. This should allow proper recovery if a crash occurs just after relation extension. Properly use vacuum_delay_point, not assorted ad-hoc CHECK_FOR_INTERRUPTS calls, in the blvacuum() main loop. Fix broken tuple-counting logic: blvacuum.c counted the number of live index tuples over again in each scan, leading to VACUUM VERBOSE reporting some multiple of the actual number of surviving index tuples after any vacuum that removed any tuples (since they'd be counted in blvacuum, maybe more than once, and then again in blvacuumcleanup, without ever zeroing the counter). It's sufficient to count them in blvacuumcleanup. stats->estimated_count is a boolean, not a counter, and we don't want to set it true, so don't add tuple counts to it. Add a couple of Asserts that we don't overrun available space on a bloom page. I don't think there's any bug there today, but the way the FreeBlockNumberArray size calculation is set up is scarily fragile, and BloomPageGetFreeSpace isn't much better. The Asserts should help catch any future mistakes. Per investigation of a report from Jeff Janes. I think the first item above may explain his report; the other changes were things I noticed while casting about for an explanation. Report: <CAMkU=1xEUuBphDwDmB1WjN4+td4kpnEniFaTBxnk1xzHCw8_OQ@mail.gmail.com>
2016-08-14 10:24:48 +08:00
if (!PageIsNew(page) && !BloomPageIsDeleted(page))
{
OffsetNumber offset,
maxOffset = BloomPageGetMaxOffset(page);
for (offset = 1; offset <= maxOffset; offset++)
{
BloomTuple *itup = BloomPageGetTuple(&so->state, page, offset);
bool res = true;
/* Check index signature with scan signature */
for (i = 0; i < so->state.opts.bloomLength; i++)
{
if ((itup->sign[i] & so->sign[i]) != so->sign[i])
{
res = false;
break;
}
}
/* Add matching tuples to bitmap */
if (res)
{
tbm_add_tuples(tbm, &itup->heapPtr, 1, true);
ntids++;
}
}
}
UnlockReleaseBuffer(buffer);
CHECK_FOR_INTERRUPTS();
}
FreeAccessStrategy(bas);
return ntids;
}