Change executor to just Assert that table locks were already obtained.

Instead of locking tables during executor startup, just Assert that
suitable locks were obtained already during the parse/plan pipeline
(or re-obtained by the plan cache).  This must be so, else we have a
hazard that concurrent DDL has invalidated the plan.

This is pretty inefficient as well as undercommented, but it's all going
to go away shortly, so I didn't try hard.  This commit is just another
attempt to use the buildfarm to see if we've missed anything in the plan
to simplify the executor's table management.

Note that the change needed here in relation_open() exposes that
parallel workers now really are accessing tables without holding any
lock of their own, whereas they were not doing that before this commit.
This does not give me a warm fuzzy feeling about that aspect of parallel
query; it does not seem like a good design, and we now know that it's
had exactly no actual testing.  I think that we should modify parallel
query so that that change can be reverted.

Discussion: https://postgr.es/m/468c85d9-540e-66a2-1dde-fec2b741e688@lab.ntt.co.jp
This commit is contained in:
Tom Lane 2018-10-03 16:05:05 -04:00
parent c03c1449c0
commit 9a3cebeaa7
3 changed files with 28 additions and 15 deletions

View File

@ -1140,9 +1140,13 @@ relation_open(Oid relationId, LOCKMODE lockmode)
/*
* If we didn't get the lock ourselves, assert that caller holds one,
* except in bootstrap mode where no locks are used.
*
* Also, parallel workers currently assume that their parent holds locks
* for tables used in the parallel query (a mighty shaky assumption).
*/
Assert(lockmode != NoLock ||
IsBootstrapProcessingMode() ||
IsParallelWorker() ||
CheckRelationLockedByMe(r, AccessShareLock, true));
/* Make note that we've accessed a temporary relation */

View File

@ -849,8 +849,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
Relation resultRelation;
resultRelationOid = getrelid(resultRelationIndex, rangeTable);
Assert(rt_fetch(resultRelationIndex, rangeTable)->rellockmode == RowExclusiveLock);
resultRelation = heap_open(resultRelationOid, RowExclusiveLock);
resultRelation = heap_open(resultRelationOid, NoLock);
Assert(CheckRelationLockedByMe(resultRelation, RowExclusiveLock, true));
InitResultRelInfo(resultRelInfo,
resultRelation,
@ -890,8 +890,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
Relation resultRelDesc;
resultRelOid = getrelid(resultRelIndex, rangeTable);
Assert(rt_fetch(resultRelIndex, rangeTable)->rellockmode == RowExclusiveLock);
resultRelDesc = heap_open(resultRelOid, RowExclusiveLock);
resultRelDesc = heap_open(resultRelOid, NoLock);
Assert(CheckRelationLockedByMe(resultRelDesc, RowExclusiveLock, true));
InitResultRelInfo(resultRelInfo,
resultRelDesc,
lfirst_int(l),
@ -903,7 +903,8 @@ InitPlan(QueryDesc *queryDesc, int eflags)
estate->es_root_result_relations = resultRelInfos;
estate->es_num_root_result_relations = num_roots;
/* Simply lock the rest of them. */
/* Simply check the rest of them are locked. */
#ifdef USE_ASSERT_CHECKING
foreach(l, plannedstmt->nonleafResultRelations)
{
Index resultRelIndex = lfirst_int(l);
@ -912,11 +913,15 @@ InitPlan(QueryDesc *queryDesc, int eflags)
if (!list_member_int(plannedstmt->rootResultRelations,
resultRelIndex))
{
Assert(rt_fetch(resultRelIndex, rangeTable)->rellockmode == RowExclusiveLock);
LockRelationOid(getrelid(resultRelIndex, rangeTable),
RowExclusiveLock);
Relation resultRelDesc;
resultRelDesc = heap_open(getrelid(resultRelIndex, rangeTable),
NoLock);
Assert(CheckRelationLockedByMe(resultRelDesc, RowExclusiveLock, true));
heap_close(resultRelDesc, NoLock);
}
}
#endif
}
}
else
@ -945,7 +950,6 @@ InitPlan(QueryDesc *queryDesc, int eflags)
{
PlanRowMark *rc = (PlanRowMark *) lfirst(l);
Oid relid;
LOCKMODE rellockmode;
Relation relation;
ExecRowMark *erm;
@ -963,8 +967,10 @@ InitPlan(QueryDesc *queryDesc, int eflags)
case ROW_MARK_SHARE:
case ROW_MARK_KEYSHARE:
case ROW_MARK_REFERENCE:
rellockmode = rt_fetch(rc->rti, rangeTable)->rellockmode;
relation = heap_open(relid, rellockmode);
relation = heap_open(relid, NoLock);
Assert(CheckRelationLockedByMe(relation,
rt_fetch(rc->rti, rangeTable)->rellockmode,
true));
break;
case ROW_MARK_COPY:
/* no physical table access is required */

View File

@ -42,6 +42,7 @@
#include "postgres.h"
#include "access/parallel.h"
#include "access/relscan.h"
#include "access/transam.h"
#include "executor/executor.h"
@ -648,12 +649,14 @@ ExecOpenScanRelation(EState *estate, Index scanrelid, int eflags)
{
Relation rel;
Oid reloid;
LOCKMODE lockmode;
/* Open the relation and acquire lock as needed */
/* Open the relation and verify lock was obtained upstream */
reloid = getrelid(scanrelid, estate->es_range_table);
lockmode = rt_fetch(scanrelid, estate->es_range_table)->rellockmode;
rel = heap_open(reloid, lockmode);
rel = heap_open(reloid, NoLock);
Assert(IsParallelWorker() ||
CheckRelationLockedByMe(rel,
rt_fetch(scanrelid, estate->es_range_table)->rellockmode,
true));
/*
* Complain if we're attempting a scan of an unscannable relation, except