mirror of
https://git.postgresql.org/git/postgresql.git
synced 2024-12-21 08:29:39 +08:00
Only skip pages marked as clean in the visibility map, if the last 32
pages were marked as clean as well. The idea is to avoid defeating OS readahead by skipping a page here and there, and also makes it less likely that we miss an opportunity to advance relfrozenxid, for the sake of only a few skipped pages.
This commit is contained in:
parent
c079090bbc
commit
bf136cf6e3
@ -29,7 +29,7 @@
|
||||
*
|
||||
*
|
||||
* IDENTIFICATION
|
||||
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.117 2009/01/16 13:27:23 heikki Exp $
|
||||
* $PostgreSQL: pgsql/src/backend/commands/vacuumlazy.c,v 1.118 2009/01/22 19:25:00 heikki Exp $
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
@ -74,6 +74,12 @@
|
||||
*/
|
||||
#define LAZY_ALLOC_TUPLES MaxHeapTuplesPerPage
|
||||
|
||||
/*
|
||||
* Before we consider skipping a page that's marked as clean in
|
||||
* visibility map, we must've seen at least this many clean pages.
|
||||
*/
|
||||
#define SKIP_PAGES_THRESHOLD 32
|
||||
|
||||
typedef struct LVRelStats
|
||||
{
|
||||
/* hasindex = true means two-pass strategy; false means one-pass */
|
||||
@ -271,6 +277,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
int i;
|
||||
PGRUsage ru0;
|
||||
Buffer vmbuffer = InvalidBuffer;
|
||||
BlockNumber all_visible_streak;
|
||||
|
||||
pg_rusage_init(&ru0);
|
||||
|
||||
@ -292,6 +299,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
|
||||
lazy_space_alloc(vacrelstats, nblocks);
|
||||
|
||||
all_visible_streak = 0;
|
||||
for (blkno = 0; blkno < nblocks; blkno++)
|
||||
{
|
||||
Buffer buf;
|
||||
@ -309,7 +317,14 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
|
||||
/*
|
||||
* Skip pages that don't require vacuuming according to the
|
||||
* visibility map.
|
||||
* visibility map. But only if we've seen a streak of at least
|
||||
* SKIP_PAGES_THRESHOLD pages marked as clean. Since we're reading
|
||||
* sequentially, the OS should be doing readahead for us and there's
|
||||
* no gain in skipping a page now and then. You need a longer run of
|
||||
* consecutive skipped pages before it's worthwhile. Also, skipping
|
||||
* even a single page means that we can't update relfrozenxid or
|
||||
* reltuples, so we only want to do it if there's a good chance to
|
||||
* skip a goodly number of pages.
|
||||
*/
|
||||
if (!scan_all)
|
||||
{
|
||||
@ -317,9 +332,15 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
visibilitymap_test(onerel, blkno, &vmbuffer);
|
||||
if (all_visible_according_to_vm)
|
||||
{
|
||||
vacrelstats->scanned_all = false;
|
||||
continue;
|
||||
all_visible_streak++;
|
||||
if (all_visible_streak >= SKIP_PAGES_THRESHOLD)
|
||||
{
|
||||
vacrelstats->scanned_all = false;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else
|
||||
all_visible_streak = 0;
|
||||
}
|
||||
|
||||
vacuum_delay_point();
|
||||
|
Loading…
Reference in New Issue
Block a user