mirror of
https://git.postgresql.org/git/postgresql.git
synced 2025-02-23 19:39:53 +08:00
Fix inplace update buffer self-deadlock.
A CacheInvalidateHeapTuple* callee might call
CatalogCacheInitializeCache(), which needs a relcache entry. Acquiring
a valid relcache entry might scan pg_class. Hence, to prevent
undetected LWLock self-deadlock, CacheInvalidateHeapTuple* callers must
not hold BUFFER_LOCK_EXCLUSIVE on buffers of pg_class. Move the
CacheInvalidateHeapTupleInplace() before the BUFFER_LOCK_EXCLUSIVE. No
back-patch, since I've reverted commit
243e9b40f1
from non-master branches.
Reported by Alexander Lakhin. Reviewed by Alexander Lakhin.
Discussion: https://postgr.es/m/10ec0bc3-5933-1189-6bb8-5dec4114558e@gmail.com
This commit is contained in:
parent
b412f402d1
commit
0bada39c83
@ -6214,6 +6214,17 @@ heap_inplace_lock(Relation relation,
|
||||
|
||||
Assert(BufferIsValid(buffer));
|
||||
|
||||
/*
|
||||
* Construct shared cache inval if necessary. Because we pass a tuple
|
||||
* version without our own inplace changes or inplace changes other
|
||||
* sessions complete while we wait for locks, inplace update mustn't
|
||||
* change catcache lookup keys. But we aren't bothering with index
|
||||
* updates either, so that's true a fortiori. After LockBuffer(), it
|
||||
* would be too late, because this might reach a
|
||||
* CatalogCacheInitializeCache() that locks "buffer".
|
||||
*/
|
||||
CacheInvalidateHeapTupleInplace(relation, oldtup_ptr, NULL);
|
||||
|
||||
LockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
|
||||
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
|
||||
|
||||
@ -6309,6 +6320,7 @@ heap_inplace_lock(Relation relation,
|
||||
if (!ret)
|
||||
{
|
||||
UnlockTuple(relation, &oldtup.t_self, InplaceUpdateTupleLock);
|
||||
ForgetInplace_Inval();
|
||||
InvalidateCatalogSnapshot();
|
||||
}
|
||||
return ret;
|
||||
@ -6345,14 +6357,6 @@ heap_inplace_update_and_unlock(Relation relation,
|
||||
dst = (char *) htup + htup->t_hoff;
|
||||
src = (char *) tuple->t_data + tuple->t_data->t_hoff;
|
||||
|
||||
/*
|
||||
* Construct shared cache inval if necessary. Note that because we only
|
||||
* pass the new version of the tuple, this mustn't be used for any
|
||||
* operations that could change catcache lookup keys. But we aren't
|
||||
* bothering with index updates either, so that's true a fortiori.
|
||||
*/
|
||||
CacheInvalidateHeapTupleInplace(relation, tuple, NULL);
|
||||
|
||||
/* Like RecordTransactionCommit(), log only if needed */
|
||||
if (XLogStandbyInfoActive())
|
||||
nmsgs = inplaceGetInvalidationMessages(&invalMessages,
|
||||
@ -6481,6 +6485,7 @@ heap_inplace_unlock(Relation relation,
|
||||
{
|
||||
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
|
||||
UnlockTuple(relation, &oldtup->t_self, InplaceUpdateTupleLock);
|
||||
ForgetInplace_Inval();
|
||||
}
|
||||
|
||||
#define FRM_NOOP 0x0001
|
||||
|
12
src/backend/utils/cache/inval.c
vendored
12
src/backend/utils/cache/inval.c
vendored
@ -1201,6 +1201,18 @@ AtInplace_Inval(void)
|
||||
inplaceInvalInfo = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* ForgetInplace_Inval
|
||||
* Alternative to PreInplace_Inval()+AtInplace_Inval(): discard queued-up
|
||||
* invalidations. This lets inplace update enumerate invalidations
|
||||
* optimistically, before locking the buffer.
|
||||
*/
|
||||
void
|
||||
ForgetInplace_Inval(void)
|
||||
{
|
||||
inplaceInvalInfo = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* AtEOSubXact_Inval
|
||||
* Process queued-up invalidation messages at end of subtransaction.
|
||||
|
@ -30,6 +30,7 @@ extern void AtEOXact_Inval(bool isCommit);
|
||||
|
||||
extern void PreInplace_Inval(void);
|
||||
extern void AtInplace_Inval(void);
|
||||
extern void ForgetInplace_Inval(void);
|
||||
|
||||
extern void AtEOSubXact_Inval(bool isCommit);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user