2016-04-01 21:42:24 +08:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* blinsert.c
|
|
|
|
* Bloom index build and insert functions.
|
|
|
|
*
|
2022-01-08 08:04:57 +08:00
|
|
|
* Copyright (c) 2016-2022, PostgreSQL Global Development Group
|
2016-04-01 21:42:24 +08:00
|
|
|
*
|
|
|
|
* IDENTIFICATION
|
|
|
|
* contrib/bloom/blinsert.c
|
|
|
|
*
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
|
2019-12-27 07:09:00 +08:00
|
|
|
#include "access/genam.h"
|
2016-04-01 21:42:24 +08:00
|
|
|
#include "access/generic_xlog.h"
|
2019-03-28 10:59:06 +08:00
|
|
|
#include "access/tableam.h"
|
2019-10-23 11:56:22 +08:00
|
|
|
#include "bloom.h"
|
2016-04-01 21:42:24 +08:00
|
|
|
#include "catalog/index.h"
|
|
|
|
#include "miscadmin.h"
|
|
|
|
#include "storage/bufmgr.h"
|
|
|
|
#include "storage/indexfsm.h"
|
2016-05-25 09:04:23 +08:00
|
|
|
#include "storage/smgr.h"
|
2016-04-01 21:42:24 +08:00
|
|
|
#include "utils/memutils.h"
|
|
|
|
#include "utils/rel.h"
|
|
|
|
|
|
|
|
PG_MODULE_MAGIC;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* State of bloom index build. We accumulate one page data here before
|
|
|
|
* flushing it to buffer manager.
|
|
|
|
*/
|
|
|
|
typedef struct
|
|
|
|
{
|
|
|
|
BloomState blstate; /* bloom index state */
|
2018-03-23 01:13:58 +08:00
|
|
|
int64 indtuples; /* total number of tuples indexed */
|
2016-04-01 21:42:24 +08:00
|
|
|
MemoryContext tmpCtx; /* temporary memory context reset after each
|
|
|
|
* tuple */
|
2018-09-02 03:27:12 +08:00
|
|
|
PGAlignedBlock data; /* cached page */
|
2018-03-23 01:13:58 +08:00
|
|
|
int count; /* number of tuples in cached page */
|
2016-04-01 21:42:24 +08:00
|
|
|
} BloomBuildState;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Flush page cached in BloomBuildState.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
flushCachedPage(Relation index, BloomBuildState *buildstate)
|
|
|
|
{
|
|
|
|
Page page;
|
|
|
|
Buffer buffer = BloomNewBuffer(index);
|
|
|
|
GenericXLogState *state;
|
|
|
|
|
|
|
|
state = GenericXLogStart(index);
|
2016-04-12 23:42:06 +08:00
|
|
|
page = GenericXLogRegisterBuffer(state, buffer, GENERIC_XLOG_FULL_IMAGE);
|
2018-09-02 03:27:12 +08:00
|
|
|
memcpy(page, buildstate->data.data, BLCKSZ);
|
2016-04-01 21:42:24 +08:00
|
|
|
GenericXLogFinish(state);
|
|
|
|
UnlockReleaseBuffer(buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* (Re)initialize cached page in BloomBuildState.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
initCachedPage(BloomBuildState *buildstate)
|
|
|
|
{
|
2018-09-02 03:27:12 +08:00
|
|
|
BloomInitPage(buildstate->data.data, 0);
|
2016-04-01 21:42:24 +08:00
|
|
|
buildstate->count = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2019-03-28 10:59:06 +08:00
|
|
|
* Per-tuple callback for table_index_build_scan.
|
2016-04-01 21:42:24 +08:00
|
|
|
*/
|
|
|
|
static void
|
2019-11-08 16:44:52 +08:00
|
|
|
bloomBuildCallback(Relation index, ItemPointer tid, Datum *values,
|
2016-04-01 21:42:24 +08:00
|
|
|
bool *isnull, bool tupleIsAlive, void *state)
|
|
|
|
{
|
|
|
|
BloomBuildState *buildstate = (BloomBuildState *) state;
|
|
|
|
MemoryContext oldCtx;
|
|
|
|
BloomTuple *itup;
|
|
|
|
|
|
|
|
oldCtx = MemoryContextSwitchTo(buildstate->tmpCtx);
|
|
|
|
|
2019-11-08 16:44:52 +08:00
|
|
|
itup = BloomFormTuple(&buildstate->blstate, tid, values, isnull);
|
2016-04-01 21:42:24 +08:00
|
|
|
|
|
|
|
/* Try to add next item to cached page */
|
2018-09-02 03:27:12 +08:00
|
|
|
if (BloomPageAddItem(&buildstate->blstate, buildstate->data.data, itup))
|
2016-04-01 21:42:24 +08:00
|
|
|
{
|
|
|
|
/* Next item was added successfully */
|
|
|
|
buildstate->count++;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* Cached page is full, flush it out and make a new one */
|
|
|
|
flushCachedPage(index, buildstate);
|
|
|
|
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
|
|
initCachedPage(buildstate);
|
|
|
|
|
2018-09-02 03:27:12 +08:00
|
|
|
if (!BloomPageAddItem(&buildstate->blstate, buildstate->data.data, itup))
|
2016-04-01 21:42:24 +08:00
|
|
|
{
|
|
|
|
/* We shouldn't be here since we're inserting to the empty page */
|
2016-04-04 02:17:20 +08:00
|
|
|
elog(ERROR, "could not add new bloom tuple to empty page");
|
2016-04-01 21:42:24 +08:00
|
|
|
}
|
2018-03-23 01:13:58 +08:00
|
|
|
|
|
|
|
/* Next item was added successfully */
|
|
|
|
buildstate->count++;
|
2016-04-01 21:42:24 +08:00
|
|
|
}
|
|
|
|
|
2018-03-23 01:13:58 +08:00
|
|
|
/* Update total tuple count */
|
|
|
|
buildstate->indtuples += 1;
|
|
|
|
|
2016-04-01 21:42:24 +08:00
|
|
|
MemoryContextSwitchTo(oldCtx);
|
|
|
|
MemoryContextReset(buildstate->tmpCtx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build a new bloom index.
|
|
|
|
*/
|
|
|
|
IndexBuildResult *
|
|
|
|
blbuild(Relation heap, Relation index, IndexInfo *indexInfo)
|
|
|
|
{
|
|
|
|
IndexBuildResult *result;
|
|
|
|
double reltuples;
|
|
|
|
BloomBuildState buildstate;
|
|
|
|
|
|
|
|
if (RelationGetNumberOfBlocks(index) != 0)
|
|
|
|
elog(ERROR, "index \"%s\" already contains data",
|
|
|
|
RelationGetRelationName(index));
|
|
|
|
|
|
|
|
/* Initialize the meta page */
|
|
|
|
BloomInitMetapage(index);
|
|
|
|
|
|
|
|
/* Initialize the bloom build state */
|
|
|
|
memset(&buildstate, 0, sizeof(buildstate));
|
|
|
|
initBloomState(&buildstate.blstate, index);
|
|
|
|
buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
|
|
|
|
"Bloom build temporary context",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-28 05:50:38 +08:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
2016-04-01 21:42:24 +08:00
|
|
|
initCachedPage(&buildstate);
|
|
|
|
|
|
|
|
/* Do the heap scan */
|
Report progress of CREATE INDEX operations
This uses the progress reporting infrastructure added by c16dc1aca5e0,
adding support for CREATE INDEX and CREATE INDEX CONCURRENTLY.
There are two pieces to this: one is index-AM-agnostic, and the other is
AM-specific. The latter is fairly elaborate for btrees, including
reportage for parallel index builds and the separate phases that btree
index creation uses; other index AMs, which are much simpler in their
building procedures, have simplistic reporting only, but that seems
sufficient, at least for non-concurrent builds.
The index-AM-agnostic part is fairly complete, providing insight into
the CONCURRENTLY wait phases as well as block-based progress during the
index validation table scan. (The index validation index scan requires
patching each AM, which has not been included here.)
Reviewers: Rahila Syed, Pavan Deolasee, Tatsuro Yamada
Discussion: https://postgr.es/m/20181220220022.mg63bhk26zdpvmcj@alvherre.pgsql
2019-04-03 02:18:08 +08:00
|
|
|
reltuples = table_index_build_scan(heap, index, indexInfo, true, true,
|
2019-03-28 10:59:06 +08:00
|
|
|
bloomBuildCallback, (void *) &buildstate,
|
|
|
|
NULL);
|
2016-04-01 21:42:24 +08:00
|
|
|
|
2018-03-23 01:13:58 +08:00
|
|
|
/* Flush last page if needed (it will be, unless heap was empty) */
|
2016-04-01 21:42:24 +08:00
|
|
|
if (buildstate.count > 0)
|
|
|
|
flushCachedPage(index, &buildstate);
|
|
|
|
|
|
|
|
MemoryContextDelete(buildstate.tmpCtx);
|
|
|
|
|
|
|
|
result = (IndexBuildResult *) palloc(sizeof(IndexBuildResult));
|
2018-03-23 01:13:58 +08:00
|
|
|
result->heap_tuples = reltuples;
|
|
|
|
result->index_tuples = buildstate.indtuples;
|
2016-04-01 21:42:24 +08:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Build an empty bloom index in the initialization fork.
|
|
|
|
*/
|
|
|
|
void
|
|
|
|
blbuildempty(Relation index)
|
|
|
|
{
|
2016-05-25 09:04:23 +08:00
|
|
|
Page metapage;
|
2016-04-01 21:42:24 +08:00
|
|
|
|
2016-05-25 09:04:23 +08:00
|
|
|
/* Construct metapage. */
|
|
|
|
metapage = (Page) palloc(BLCKSZ);
|
|
|
|
BloomFillMetapage(index, metapage);
|
|
|
|
|
2016-12-09 03:09:09 +08:00
|
|
|
/*
|
|
|
|
* Write the page and log it. It might seem that an immediate sync would
|
|
|
|
* be sufficient to guarantee that the file exists on disk, but recovery
|
|
|
|
* itself might remove it while replaying, for example, an
|
Add new block-by-block strategy for CREATE DATABASE.
Because this strategy logs changes on a block-by-block basis, it
avoids the need to checkpoint before and after the operation.
However, because it logs each changed block individually, it might
generate a lot of extra write-ahead logging if the template database
is large. Therefore, the older strategy remains available via a new
STRATEGY parameter to CREATE DATABASE, and a corresponding --strategy
option to createdb.
Somewhat controversially, this patch assembles the list of relations
to be copied to the new database by reading the pg_class relation of
the template database. Cross-database access like this isn't normally
possible, but it can be made to work here because there can't be any
connections to the database being copied, nor can it contain any
in-doubt transactions. Even so, we have to use lower-level interfaces
than normal, since the table scan and relcache interfaces will not
work for a database to which we're not connected. The advantage of
this approach is that we do not need to rely on the filesystem to
determine what ought to be copied, but instead on PostgreSQL's own
knowledge of the database structure. This avoids, for example,
copying stray files that happen to be located in the source database
directory.
Dilip Kumar, with a fairly large number of cosmetic changes by me.
Reviewed and tested by Ashutosh Sharma, Andres Freund, John Naylor,
Greg Nancarrow, Neha Sharma. Additional feedback from Bruce Momjian,
Heikki Linnakangas, Julien Rouhaud, Adam Brusselback, Kyotaro
Horiguchi, Tomas Vondra, Andrew Dunstan, Álvaro Herrera, and others.
Discussion: http://postgr.es/m/CA+TgmoYtcdxBjLh31DLxUXHxFVMPGzrU5_T=CYCvRyFHywSBUQ@mail.gmail.com
2022-03-29 23:31:43 +08:00
|
|
|
* XLOG_DBASE_CREATE* or XLOG_TBLSPC_CREATE record. Therefore, we need
|
2016-12-09 03:09:09 +08:00
|
|
|
* this even when wal_level=minimal.
|
|
|
|
*/
|
2016-05-25 09:04:23 +08:00
|
|
|
PageSetChecksumInplace(metapage, BLOOM_METAPAGE_BLKNO);
|
2021-07-13 05:01:29 +08:00
|
|
|
smgrwrite(RelationGetSmgr(index), INIT_FORKNUM, BLOOM_METAPAGE_BLKNO,
|
2016-05-25 09:04:23 +08:00
|
|
|
(char *) metapage, true);
|
Change internal RelFileNode references to RelFileNumber or RelFileLocator.
We have been using the term RelFileNode to refer to either (1) the
integer that is used to name the sequence of files for a certain relation
within the directory set aside for that tablespace/database combination;
or (2) that value plus the OIDs of the tablespace and database; or
occasionally (3) the whole series of files created for a relation
based on those values. Using the same name for more than one thing is
confusing.
Replace RelFileNode with RelFileNumber when we're talking about just the
single number, i.e. (1) from above, and with RelFileLocator when we're
talking about all the things that are needed to locate a relation's files
on disk, i.e. (2) from above. In the places where we refer to (3) as
a relfilenode, instead refer to "relation storage".
Since there is a ton of SQL code in the world that knows about
pg_class.relfilenode, don't change the name of that column, or of other
SQL-facing things that derive their name from it.
On the other hand, do adjust closely-related internal terminology. For
example, the structure member names dbNode and spcNode appear to be
derived from the fact that the structure itself was called RelFileNode,
so change those to dbOid and spcOid. Likewise, various variables with
names like rnode and relnode get renamed appropriately, according to
how they're being used in context.
Hopefully, this is clearer than before. It is also preparation for
future patches that intend to widen the relfilenumber fields from its
current width of 32 bits. Variables that store a relfilenumber are now
declared as type RelFileNumber rather than type Oid; right now, these
are the same, but that can now more easily be changed.
Dilip Kumar, per an idea from me. Reviewed also by Andres Freund.
I fixed some whitespace issues, changed a couple of words in a
comment, and made one other minor correction.
Discussion: http://postgr.es/m/CA+TgmoamOtXbVAQf9hWFzonUo6bhhjS6toZQd7HZ-pmojtAmag@mail.gmail.com
Discussion: http://postgr.es/m/CA+Tgmobp7+7kmi4gkq7Y+4AM9fTvL+O1oQ4-5gFTT+6Ng-dQ=g@mail.gmail.com
Discussion: http://postgr.es/m/CAFiTN-vTe79M8uDH1yprOU64MNFE+R3ODRuA+JWf27JbhY4hJw@mail.gmail.com
2022-07-06 23:39:09 +08:00
|
|
|
log_newpage(&(RelationGetSmgr(index))->smgr_rlocator.locator, INIT_FORKNUM,
|
2017-11-04 04:31:32 +08:00
|
|
|
BLOOM_METAPAGE_BLKNO, metapage, true);
|
2016-05-25 09:04:23 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* An immediate sync is required even if we xlog'd the page, because the
|
|
|
|
* write did not go through shared_buffers and therefore a concurrent
|
|
|
|
* checkpoint may have moved the redo pointer past our xlog record.
|
|
|
|
*/
|
2021-07-13 05:01:29 +08:00
|
|
|
smgrimmedsync(RelationGetSmgr(index), INIT_FORKNUM);
|
2016-04-01 21:42:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Insert new tuple to the bloom index.
|
|
|
|
*/
|
|
|
|
bool
|
|
|
|
blinsert(Relation index, Datum *values, bool *isnull,
|
Allow index AMs to cache data across aminsert calls within a SQL command.
It's always been possible for index AMs to cache data across successive
amgettuple calls within a single SQL command: the IndexScanDesc.opaque
field is meant for precisely that. However, no comparable facility
exists for amortizing setup work across successive aminsert calls.
This patch adds such a feature and teaches GIN, GIST, and BRIN to use it
to amortize catalog lookups they'd previously been doing on every call.
(The other standard index AMs keep everything they need in the relcache,
so there's little to improve there.)
For GIN, the overall improvement in a statement that inserts many rows
can be as much as 10%, though it seems a bit less for the other two.
In addition, this makes a really significant difference in runtime
for CLOBBER_CACHE_ALWAYS tests, since in those builds the repeated
catalog lookups are vastly more expensive.
The reason this has been hard up to now is that the aminsert function is
not passed any useful place to cache per-statement data. What I chose to
do is to add suitable fields to struct IndexInfo and pass that to aminsert.
That's not widening the index AM API very much because IndexInfo is already
within the ken of ambuild; in fact, by passing the same info to aminsert
as to ambuild, this is really removing an inconsistency in the AM API.
Discussion: https://postgr.es/m/27568.1486508680@sss.pgh.pa.us
2017-02-10 00:52:12 +08:00
|
|
|
ItemPointer ht_ctid, Relation heapRel,
|
|
|
|
IndexUniqueCheck checkUnique,
|
2021-01-14 00:11:00 +08:00
|
|
|
bool indexUnchanged,
|
Allow index AMs to cache data across aminsert calls within a SQL command.
It's always been possible for index AMs to cache data across successive
amgettuple calls within a single SQL command: the IndexScanDesc.opaque
field is meant for precisely that. However, no comparable facility
exists for amortizing setup work across successive aminsert calls.
This patch adds such a feature and teaches GIN, GIST, and BRIN to use it
to amortize catalog lookups they'd previously been doing on every call.
(The other standard index AMs keep everything they need in the relcache,
so there's little to improve there.)
For GIN, the overall improvement in a statement that inserts many rows
can be as much as 10%, though it seems a bit less for the other two.
In addition, this makes a really significant difference in runtime
for CLOBBER_CACHE_ALWAYS tests, since in those builds the repeated
catalog lookups are vastly more expensive.
The reason this has been hard up to now is that the aminsert function is
not passed any useful place to cache per-statement data. What I chose to
do is to add suitable fields to struct IndexInfo and pass that to aminsert.
That's not widening the index AM API very much because IndexInfo is already
within the ken of ambuild; in fact, by passing the same info to aminsert
as to ambuild, this is really removing an inconsistency in the AM API.
Discussion: https://postgr.es/m/27568.1486508680@sss.pgh.pa.us
2017-02-10 00:52:12 +08:00
|
|
|
IndexInfo *indexInfo)
|
2016-04-01 21:42:24 +08:00
|
|
|
{
|
|
|
|
BloomState blstate;
|
|
|
|
BloomTuple *itup;
|
|
|
|
MemoryContext oldCtx;
|
|
|
|
MemoryContext insertCtx;
|
|
|
|
BloomMetaPageData *metaData;
|
|
|
|
Buffer buffer,
|
|
|
|
metaBuffer;
|
|
|
|
Page page,
|
|
|
|
metaPage;
|
|
|
|
BlockNumber blkno = InvalidBlockNumber;
|
|
|
|
OffsetNumber nStart;
|
|
|
|
GenericXLogState *state;
|
|
|
|
|
|
|
|
insertCtx = AllocSetContextCreate(CurrentMemoryContext,
|
|
|
|
"Bloom insert temporary context",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-28 05:50:38 +08:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
2016-04-01 21:42:24 +08:00
|
|
|
|
|
|
|
oldCtx = MemoryContextSwitchTo(insertCtx);
|
|
|
|
|
|
|
|
initBloomState(&blstate, index);
|
|
|
|
itup = BloomFormTuple(&blstate, ht_ctid, values, isnull);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* At first, try to insert new tuple to the first page in notFullPage
|
2016-04-10 03:39:14 +08:00
|
|
|
* array. If successful, we don't need to modify the meta page.
|
2016-04-01 21:42:24 +08:00
|
|
|
*/
|
|
|
|
metaBuffer = ReadBuffer(index, BLOOM_METAPAGE_BLKNO);
|
|
|
|
LockBuffer(metaBuffer, BUFFER_LOCK_SHARE);
|
2016-04-20 21:31:19 +08:00
|
|
|
metaData = BloomPageGetMeta(BufferGetPage(metaBuffer));
|
2016-04-01 21:42:24 +08:00
|
|
|
|
|
|
|
if (metaData->nEnd > metaData->nStart)
|
|
|
|
{
|
|
|
|
blkno = metaData->notFullPage[metaData->nStart];
|
|
|
|
Assert(blkno != InvalidBlockNumber);
|
2016-04-10 03:39:14 +08:00
|
|
|
|
|
|
|
/* Don't hold metabuffer lock while doing insert */
|
2016-04-01 21:42:24 +08:00
|
|
|
LockBuffer(metaBuffer, BUFFER_LOCK_UNLOCK);
|
|
|
|
|
|
|
|
buffer = ReadBuffer(index, blkno);
|
|
|
|
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
2016-04-10 03:39:14 +08:00
|
|
|
|
2016-04-01 21:42:24 +08:00
|
|
|
state = GenericXLogStart(index);
|
2016-04-12 23:42:06 +08:00
|
|
|
page = GenericXLogRegisterBuffer(state, buffer, 0);
|
2016-04-01 21:42:24 +08:00
|
|
|
|
Fix assorted bugs in contrib/bloom.
In blinsert(), cope with the possibility that a page we pull from the
notFullPage list is marked BLOOM_DELETED. This could happen if VACUUM
recently marked it deleted but hasn't (yet) updated the metapage.
We can re-use such a page safely, but we *must* reinitialize it so that
it's no longer marked deleted.
Fix blvacuum() so that it updates the notFullPage list even if it's
going to update it to empty. The previous "optimization" of skipping
the update seems pretty dubious, since it means that the next blinsert()
will uselessly visit whatever pages we left in the list.
Uniformly treat PageIsNew pages the same as deleted pages. This should
allow proper recovery if a crash occurs just after relation extension.
Properly use vacuum_delay_point, not assorted ad-hoc CHECK_FOR_INTERRUPTS
calls, in the blvacuum() main loop.
Fix broken tuple-counting logic: blvacuum.c counted the number of live
index tuples over again in each scan, leading to VACUUM VERBOSE reporting
some multiple of the actual number of surviving index tuples after any
vacuum that removed any tuples (since they'd be counted in blvacuum, maybe
more than once, and then again in blvacuumcleanup, without ever zeroing the
counter). It's sufficient to count them in blvacuumcleanup.
stats->estimated_count is a boolean, not a counter, and we don't want
to set it true, so don't add tuple counts to it.
Add a couple of Asserts that we don't overrun available space on a bloom
page. I don't think there's any bug there today, but the way the
FreeBlockNumberArray size calculation is set up is scarily fragile, and
BloomPageGetFreeSpace isn't much better. The Asserts should help catch
any future mistakes.
Per investigation of a report from Jeff Janes. I think the first item
above may explain his report; the other changes were things I noticed
while casting about for an explanation.
Report: <CAMkU=1xEUuBphDwDmB1WjN4+td4kpnEniFaTBxnk1xzHCw8_OQ@mail.gmail.com>
2016-08-14 10:24:48 +08:00
|
|
|
/*
|
|
|
|
* We might have found a page that was recently deleted by VACUUM. If
|
|
|
|
* so, we can reuse it, but we must reinitialize it.
|
|
|
|
*/
|
|
|
|
if (PageIsNew(page) || BloomPageIsDeleted(page))
|
|
|
|
BloomInitPage(page, 0);
|
|
|
|
|
2016-04-01 21:42:24 +08:00
|
|
|
if (BloomPageAddItem(&blstate, page, itup))
|
|
|
|
{
|
2016-04-10 03:39:14 +08:00
|
|
|
/* Success! Apply the change, clean up, and exit */
|
2016-04-01 21:42:24 +08:00
|
|
|
GenericXLogFinish(state);
|
|
|
|
UnlockReleaseBuffer(buffer);
|
|
|
|
ReleaseBuffer(metaBuffer);
|
|
|
|
MemoryContextSwitchTo(oldCtx);
|
|
|
|
MemoryContextDelete(insertCtx);
|
|
|
|
return false;
|
|
|
|
}
|
2016-04-10 03:39:14 +08:00
|
|
|
|
|
|
|
/* Didn't fit, must try other pages */
|
|
|
|
GenericXLogAbort(state);
|
|
|
|
UnlockReleaseBuffer(buffer);
|
2016-04-01 21:42:24 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2016-04-10 03:39:14 +08:00
|
|
|
/* No entries in notFullPage */
|
2016-04-01 21:42:24 +08:00
|
|
|
LockBuffer(metaBuffer, BUFFER_LOCK_UNLOCK);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Try other pages in notFullPage array. We will have to change nStart in
|
|
|
|
* metapage. Thus, grab exclusive lock on metapage.
|
|
|
|
*/
|
|
|
|
LockBuffer(metaBuffer, BUFFER_LOCK_EXCLUSIVE);
|
|
|
|
|
2016-04-10 03:39:14 +08:00
|
|
|
/* nStart might have changed while we didn't have lock */
|
2016-04-01 21:42:24 +08:00
|
|
|
nStart = metaData->nStart;
|
2016-04-10 03:39:14 +08:00
|
|
|
|
|
|
|
/* Skip first page if we already tried it above */
|
|
|
|
if (nStart < metaData->nEnd &&
|
2016-04-01 21:42:24 +08:00
|
|
|
blkno == metaData->notFullPage[nStart])
|
|
|
|
nStart++;
|
|
|
|
|
2016-04-10 03:39:14 +08:00
|
|
|
/*
|
|
|
|
* This loop iterates for each page we try from the notFullPage array, and
|
|
|
|
* will also initialize a GenericXLogState for the fallback case of having
|
|
|
|
* to allocate a new page.
|
|
|
|
*/
|
|
|
|
for (;;)
|
2016-04-01 21:42:24 +08:00
|
|
|
{
|
2016-04-10 03:39:14 +08:00
|
|
|
state = GenericXLogStart(index);
|
|
|
|
|
|
|
|
/* get modifiable copy of metapage */
|
2016-04-12 23:42:06 +08:00
|
|
|
metaPage = GenericXLogRegisterBuffer(state, metaBuffer, 0);
|
2016-04-10 03:39:14 +08:00
|
|
|
metaData = BloomPageGetMeta(metaPage);
|
|
|
|
|
|
|
|
if (nStart >= metaData->nEnd)
|
|
|
|
break; /* no more entries in notFullPage array */
|
|
|
|
|
2016-04-01 21:42:24 +08:00
|
|
|
blkno = metaData->notFullPage[nStart];
|
|
|
|
Assert(blkno != InvalidBlockNumber);
|
|
|
|
|
|
|
|
buffer = ReadBuffer(index, blkno);
|
|
|
|
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
2016-04-12 23:42:06 +08:00
|
|
|
page = GenericXLogRegisterBuffer(state, buffer, 0);
|
2016-04-01 21:42:24 +08:00
|
|
|
|
Fix assorted bugs in contrib/bloom.
In blinsert(), cope with the possibility that a page we pull from the
notFullPage list is marked BLOOM_DELETED. This could happen if VACUUM
recently marked it deleted but hasn't (yet) updated the metapage.
We can re-use such a page safely, but we *must* reinitialize it so that
it's no longer marked deleted.
Fix blvacuum() so that it updates the notFullPage list even if it's
going to update it to empty. The previous "optimization" of skipping
the update seems pretty dubious, since it means that the next blinsert()
will uselessly visit whatever pages we left in the list.
Uniformly treat PageIsNew pages the same as deleted pages. This should
allow proper recovery if a crash occurs just after relation extension.
Properly use vacuum_delay_point, not assorted ad-hoc CHECK_FOR_INTERRUPTS
calls, in the blvacuum() main loop.
Fix broken tuple-counting logic: blvacuum.c counted the number of live
index tuples over again in each scan, leading to VACUUM VERBOSE reporting
some multiple of the actual number of surviving index tuples after any
vacuum that removed any tuples (since they'd be counted in blvacuum, maybe
more than once, and then again in blvacuumcleanup, without ever zeroing the
counter). It's sufficient to count them in blvacuumcleanup.
stats->estimated_count is a boolean, not a counter, and we don't want
to set it true, so don't add tuple counts to it.
Add a couple of Asserts that we don't overrun available space on a bloom
page. I don't think there's any bug there today, but the way the
FreeBlockNumberArray size calculation is set up is scarily fragile, and
BloomPageGetFreeSpace isn't much better. The Asserts should help catch
any future mistakes.
Per investigation of a report from Jeff Janes. I think the first item
above may explain his report; the other changes were things I noticed
while casting about for an explanation.
Report: <CAMkU=1xEUuBphDwDmB1WjN4+td4kpnEniFaTBxnk1xzHCw8_OQ@mail.gmail.com>
2016-08-14 10:24:48 +08:00
|
|
|
/* Basically same logic as above */
|
|
|
|
if (PageIsNew(page) || BloomPageIsDeleted(page))
|
|
|
|
BloomInitPage(page, 0);
|
|
|
|
|
2016-04-01 21:42:24 +08:00
|
|
|
if (BloomPageAddItem(&blstate, page, itup))
|
|
|
|
{
|
2016-04-10 03:39:14 +08:00
|
|
|
/* Success! Apply the changes, clean up, and exit */
|
2016-04-01 21:42:24 +08:00
|
|
|
metaData->nStart = nStart;
|
|
|
|
GenericXLogFinish(state);
|
|
|
|
UnlockReleaseBuffer(buffer);
|
|
|
|
UnlockReleaseBuffer(metaBuffer);
|
|
|
|
MemoryContextSwitchTo(oldCtx);
|
|
|
|
MemoryContextDelete(insertCtx);
|
|
|
|
return false;
|
|
|
|
}
|
2016-04-10 03:39:14 +08:00
|
|
|
|
|
|
|
/* Didn't fit, must try other pages */
|
|
|
|
GenericXLogAbort(state);
|
|
|
|
UnlockReleaseBuffer(buffer);
|
2016-04-01 21:42:24 +08:00
|
|
|
nStart++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Didn't find place to insert in notFullPage array. Allocate new page.
|
2016-04-10 03:39:14 +08:00
|
|
|
* (XXX is it good to do this while holding ex-lock on the metapage??)
|
2016-04-01 21:42:24 +08:00
|
|
|
*/
|
|
|
|
buffer = BloomNewBuffer(index);
|
|
|
|
|
2016-04-12 23:42:06 +08:00
|
|
|
page = GenericXLogRegisterBuffer(state, buffer, GENERIC_XLOG_FULL_IMAGE);
|
2016-04-01 21:42:24 +08:00
|
|
|
BloomInitPage(page, 0);
|
2016-04-04 02:17:20 +08:00
|
|
|
|
|
|
|
if (!BloomPageAddItem(&blstate, page, itup))
|
|
|
|
{
|
2016-04-10 03:39:14 +08:00
|
|
|
/* We shouldn't be here since we're inserting to an empty page */
|
2016-04-04 02:17:20 +08:00
|
|
|
elog(ERROR, "could not add new bloom tuple to empty page");
|
|
|
|
}
|
2016-04-01 21:42:24 +08:00
|
|
|
|
2016-04-10 03:39:14 +08:00
|
|
|
/* Reset notFullPage array to contain just this new page */
|
2016-04-01 21:42:24 +08:00
|
|
|
metaData->nStart = 0;
|
|
|
|
metaData->nEnd = 1;
|
|
|
|
metaData->notFullPage[0] = BufferGetBlockNumber(buffer);
|
|
|
|
|
2016-04-10 03:39:14 +08:00
|
|
|
/* Apply the changes, clean up, and exit */
|
2016-04-01 21:42:24 +08:00
|
|
|
GenericXLogFinish(state);
|
|
|
|
|
|
|
|
UnlockReleaseBuffer(buffer);
|
|
|
|
UnlockReleaseBuffer(metaBuffer);
|
|
|
|
|
2016-04-10 03:39:14 +08:00
|
|
|
MemoryContextSwitchTo(oldCtx);
|
|
|
|
MemoryContextDelete(insertCtx);
|
|
|
|
|
2016-04-01 21:42:24 +08:00
|
|
|
return false;
|
|
|
|
}
|