mirror of
https://git.postgresql.org/git/postgresql.git
synced 2025-01-06 15:24:56 +08:00
Ensure we MarkBufferDirty before visibilitymap_set()
logs the heap page and sets the LSN. Otherwise a checkpoint could occur between those actions and leave us in an inconsistent state. Jeff Davis
This commit is contained in:
parent
fdea2530bd
commit
730924397c
@ -894,26 +894,25 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
|
||||
freespace = PageGetHeapFreeSpace(page);
|
||||
|
||||
/* mark page all-visible, if appropriate */
|
||||
if (all_visible)
|
||||
if (all_visible && !all_visible_according_to_vm)
|
||||
{
|
||||
if (!PageIsAllVisible(page))
|
||||
{
|
||||
PageSetAllVisible(page);
|
||||
MarkBufferDirty(buf);
|
||||
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
|
||||
vmbuffer, visibility_cutoff_xid);
|
||||
}
|
||||
else if (!all_visible_according_to_vm)
|
||||
{
|
||||
/*
|
||||
* It should never be the case that the visibility map page is
|
||||
* set while the page-level bit is clear, but the reverse is
|
||||
* allowed. Set the visibility map bit as well so that we get
|
||||
* back in sync.
|
||||
*/
|
||||
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
|
||||
vmbuffer, visibility_cutoff_xid);
|
||||
}
|
||||
/*
|
||||
* It should never be the case that the visibility map page is set
|
||||
* while the page-level bit is clear, but the reverse is allowed
|
||||
* (if checksums are not enabled). Regardless, set the both bits
|
||||
* so that we get back in sync.
|
||||
*
|
||||
* NB: If the heap page is all-visible but the VM bit is not set,
|
||||
* we don't need to dirty the heap page. However, if checksums are
|
||||
* enabled, we do need to make sure that the heap page is dirtied
|
||||
* before passing it to visibilitymap_set(), because it may be
|
||||
* logged. Given that this situation should only happen in rare
|
||||
* cases after a crash, it is not worth optimizing.
|
||||
*/
|
||||
PageSetAllVisible(page);
|
||||
MarkBufferDirty(buf);
|
||||
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
|
||||
vmbuffer, visibility_cutoff_xid);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1138,6 +1137,14 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
|
||||
|
||||
PageRepairFragmentation(page);
|
||||
|
||||
/*
|
||||
* Mark buffer dirty before we write WAL.
|
||||
*
|
||||
* If checksums are enabled, visibilitymap_set() may log the heap page, so
|
||||
* we must mark heap buffer dirty before calling visibilitymap_set().
|
||||
*/
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
/*
|
||||
* Now that we have removed the dead tuples from the page, once again check
|
||||
* if the page has become all-visible.
|
||||
@ -1151,8 +1158,6 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
|
||||
visibility_cutoff_xid);
|
||||
}
|
||||
|
||||
MarkBufferDirty(buffer);
|
||||
|
||||
/* XLOG stuff */
|
||||
if (RelationNeedsWAL(onerel))
|
||||
{
|
||||
|
Loading…
Reference in New Issue
Block a user