diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 72807f40bf..5bf6f19e0e 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -6023,8 +6023,8 @@ heap_abort_speculative(Relation relation, ItemPointer tid) * transaction. If compatible, return true with the buffer exclusive-locked, * and the caller must release that by calling * heap_inplace_update_and_unlock(), calling heap_inplace_unlock(), or raising - * an error. Otherwise, return false after blocking transactions, if any, - * have ended. + * an error. Otherwise, call release_callback(arg), wait for blocking + * transactions to end, and return false. * * Since this is intended for system catalogs and SERIALIZABLE doesn't cover * DDL, this doesn't guarantee any particular predicate locking. @@ -6058,7 +6058,8 @@ heap_abort_speculative(Relation relation, ItemPointer tid) */ bool heap_inplace_lock(Relation relation, - HeapTuple oldtup_ptr, Buffer buffer) + HeapTuple oldtup_ptr, Buffer buffer, + void (*release_callback) (void *), void *arg) { HeapTupleData oldtup = *oldtup_ptr; /* minimize diff vs. heap_update() */ TM_Result result; @@ -6123,6 +6124,7 @@ heap_inplace_lock(Relation relation, lockmode, NULL)) { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); + release_callback(arg); ret = false; MultiXactIdWait((MultiXactId) xwait, mxact_status, infomask, relation, &oldtup.t_self, XLTW_Update, @@ -6138,6 +6140,7 @@ heap_inplace_lock(Relation relation, else { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); + release_callback(arg); ret = false; XactLockTableWait(xwait, relation, &oldtup.t_self, XLTW_Update); @@ -6149,6 +6152,7 @@ heap_inplace_lock(Relation relation, if (!ret) { LockBuffer(buffer, BUFFER_LOCK_UNLOCK); + release_callback(arg); } } diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index e3b626b303..67246406cf 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -805,6 +805,7 @@ systable_inplace_update_begin(Relation relation, int retries = 0; SysScanDesc scan; HeapTuple oldtup; + BufferHeapTupleTableSlot *bslot; /* * For now, we don't allow parallel updates. Unlike a regular update, @@ -826,10 +827,9 @@ systable_inplace_update_begin(Relation relation, Assert(IsInplaceUpdateRelation(relation) || !IsSystemRelation(relation)); /* Loop for an exclusive-locked buffer of a non-updated tuple. */ - for (;;) + do { TupleTableSlot *slot; - BufferHeapTupleTableSlot *bslot; CHECK_FOR_INTERRUPTS(); @@ -855,11 +855,9 @@ systable_inplace_update_begin(Relation relation, slot = scan->slot; Assert(TTS_IS_BUFFERTUPLE(slot)); bslot = (BufferHeapTupleTableSlot *) slot; - if (heap_inplace_lock(scan->heap_rel, - bslot->base.tuple, bslot->buffer)) - break; - systable_endscan(scan); - }; + } while (!heap_inplace_lock(scan->heap_rel, + bslot->base.tuple, bslot->buffer, + (void (*) (void *)) systable_endscan, scan)); *oldtupcopy = heap_copytuple(oldtup); *state = scan; diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index a1d76a6285..edf8ec69b1 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -257,7 +257,8 @@ extern TM_Result heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer *buffer, struct TM_FailureData *tmfd); extern bool heap_inplace_lock(Relation relation, - HeapTuple oldtup_ptr, Buffer buffer); + HeapTuple oldtup_ptr, Buffer buffer, + void (*release_callback) (void *), void *arg); extern void heap_inplace_update_and_unlock(Relation relation, HeapTuple oldtup, HeapTuple tuple, Buffer buffer);