diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 0b3332ecf5d..22b3f5fe08b 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6377,7 +6377,9 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, * are older than the specified cutoff XID and cutoff MultiXactId. If so, * setup enough state (in the *frz output argument) to later execute and * WAL-log what we would need to do, and return TRUE. Return FALSE if nothing - * is to be changed. + * is to be changed. In addition, set *totally_frozen_p to true if the tuple + * will be totally frozen after these operations are performed and false if + * more freezing will eventually be required. * * Caller is responsible for setting the offset field, if appropriate. * @@ -6402,12 +6404,12 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask, bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, - xl_heap_freeze_tuple *frz) - + xl_heap_freeze_tuple *frz, bool *totally_frozen_p) { bool changed = false; bool freeze_xmax = false; TransactionId xid; + bool totally_frozen = true; frz->frzflags = 0; frz->t_infomask2 = tuple->t_infomask2; @@ -6416,11 +6418,15 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, /* Process xmin */ xid = HeapTupleHeaderGetXmin(tuple); - if (TransactionIdIsNormal(xid) && - TransactionIdPrecedes(xid, cutoff_xid)) + if (TransactionIdIsNormal(xid)) { - frz->t_infomask |= HEAP_XMIN_FROZEN; - changed = true; + if (TransactionIdPrecedes(xid, cutoff_xid)) + { + frz->t_infomask |= HEAP_XMIN_FROZEN; + changed = true; + } + else + totally_frozen = false; } /* @@ -6458,6 +6464,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, if (flags & FRM_MARK_COMMITTED) frz->t_infomask &= HEAP_XMAX_COMMITTED; changed = true; + totally_frozen = false; } else if (flags & FRM_RETURN_IS_MULTI) { @@ -6479,16 +6486,19 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, frz->xmax = newxmax; changed = true; + totally_frozen = false; } else { Assert(flags & FRM_NOOP); } } - else if (TransactionIdIsNormal(xid) && - TransactionIdPrecedes(xid, cutoff_xid)) + else if (TransactionIdIsNormal(xid)) { - freeze_xmax = true; + if (TransactionIdPrecedes(xid, cutoff_xid)) + freeze_xmax = true; + else + totally_frozen = false; } if (freeze_xmax) @@ -6514,8 +6524,15 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, if (tuple->t_infomask & HEAP_MOVED) { xid = HeapTupleHeaderGetXvac(tuple); - if (TransactionIdIsNormal(xid) && - TransactionIdPrecedes(xid, cutoff_xid)) + /* + * For Xvac, we ignore the cutoff_xid and just always perform the + * freeze operation. The oldest release in which such a value can + * actually be set is PostgreSQL 8.4, because old-style VACUUM FULL + * was removed in PostgreSQL 9.0. Note that if we were to respect + * cutoff_xid here, we'd need to make surely to clear totally_frozen + * when we skipped freezing on that basis. + */ + if (TransactionIdIsNormal(xid)) { /* * If a MOVED_OFF tuple is not dead, the xvac transaction must @@ -6537,6 +6554,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, } } + *totally_frozen_p = totally_frozen; return changed; } @@ -6587,9 +6605,10 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, { xl_heap_freeze_tuple frz; bool do_freeze; + bool tuple_totally_frozen; do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi, - &frz); + &frz, &tuple_totally_frozen); /* * Note that because this is not a WAL-logged operation, we don't need to diff --git a/src/backend/commands/vacuumlazy.c b/src/backend/commands/vacuumlazy.c index 0010ca9a801..cb5777ffdf9 100644 --- a/src/backend/commands/vacuumlazy.c +++ b/src/backend/commands/vacuumlazy.c @@ -1054,6 +1054,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, } else { + bool tuple_totally_frozen; + num_tuples += 1; hastup = true; @@ -1062,9 +1064,11 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats, * freezing. Note we already have exclusive buffer lock. */ if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit, - MultiXactCutoff, &frozen[nfrozen])) + MultiXactCutoff, &frozen[nfrozen], + &tuple_totally_frozen)) frozen[nfrozen++].offset = offnum; - else if (heap_tuple_needs_eventual_freeze(tuple.t_data)) + + if (!tuple_totally_frozen) all_frozen = false; } } /* scan along page */ diff --git a/src/include/access/heapam_xlog.h b/src/include/access/heapam_xlog.h index ad30217cfbf..a822d0b82dc 100644 --- a/src/include/access/heapam_xlog.h +++ b/src/include/access/heapam_xlog.h @@ -386,7 +386,8 @@ extern XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer, extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, TransactionId cutoff_multi, - xl_heap_freeze_tuple *frz); + xl_heap_freeze_tuple *frz, + bool *totally_frozen); extern void heap_execute_freeze_tuple(HeapTupleHeader tuple, xl_heap_freeze_tuple *xlrec_tp); extern XLogRecPtr log_heap_visible(RelFileNode rnode, Buffer heap_buffer,