2016-03-08 21:38:50 +08:00
|
|
|
/*-------------------------------------------------------------------------
|
|
|
|
*
|
|
|
|
* pg_visibility.c
|
|
|
|
* display visibility map information and page-level visibility bits
|
|
|
|
*
|
2021-01-03 02:06:25 +08:00
|
|
|
* Copyright (c) 2016-2021, PostgreSQL Global Development Group
|
Fix bugs in contrib/pg_visibility.
collect_corrupt_items() failed to initialize tuple.t_self. While
HeapTupleSatisfiesVacuum() doesn't actually use that value, it does
Assert that it's valid, so that the code would dump core if ip_posid
chanced to be zero. (That's somewhat unlikely, which probably explains
how this got missed. In any case it wouldn't matter for field use.)
Also, collect_corrupt_items was returning the wrong TIDs, that is the
contents of t_ctid rather than the tuple's own location. This would
be the same thing in simple cases, but it could be wrong if, for
example, a past update attempt had been rolled back, leaving a live
tuple whose t_ctid doesn't point at itself.
Also, in pg_visibility(), guard against trying to read a page past
the end of the rel. The VM code handles inquiries beyond the end
of the map by silently returning zeroes, and it seems like we should
do the same thing here.
I ran into the assertion failure while using pg_visibility to check
pg_upgrade's behavior, and then noted the other problems while
reading the code.
Report: <29043.1475288648@sss.pgh.pa.us>
2016-10-02 04:32:54 +08:00
|
|
|
*
|
2016-03-08 21:38:50 +08:00
|
|
|
* contrib/pg_visibility/pg_visibility.c
|
|
|
|
*-------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
#include "postgres.h"
|
|
|
|
|
Don't include heapam.h from others headers.
heapam.h previously was included in a number of widely used
headers (e.g. execnodes.h, indirectly in executor.h, ...). That's
problematic on its own, as heapam.h contains a lot of low-level
details that don't need to be exposed that widely, but becomes more
problematic with the upcoming introduction of pluggable table storage
- it seems inappropriate for heapam.h to be included that widely
afterwards.
heapam.h was largely only included in other headers to get the
HeapScanDesc typedef (which was defined in heapam.h, even though
HeapScanDescData is defined in relscan.h). The better solution here
seems to be to just use the underlying struct (forward declared where
necessary). Similar for BulkInsertState.
Another problem was that LockTupleMode was used in executor.h - parts
of the file tried to cope without heapam.h, but due to the fact that
it indirectly included it, several subsequent violations of that goal
were not not noticed. We could just reuse the approach of declaring
parameters as int, but it seems nicer to move LockTupleMode to
lockoptions.h - that's not a perfect location, but also doesn't seem
bad.
As a number of files relied on implicitly included heapam.h, a
significant number of files grew an explicit include. It's quite
probably that a few external projects will need to do the same.
Author: Andres Freund
Reviewed-By: Alvaro Herrera
Discussion: https://postgr.es/m/20190114000701.y4ttcb74jpskkcfb@alap3.anarazel.de
2019-01-15 07:54:18 +08:00
|
|
|
#include "access/heapam.h"
|
2016-03-08 21:38:50 +08:00
|
|
|
#include "access/htup_details.h"
|
|
|
|
#include "access/visibilitymap.h"
|
|
|
|
#include "catalog/pg_type.h"
|
2016-06-18 05:37:30 +08:00
|
|
|
#include "catalog/storage_xlog.h"
|
2016-03-08 21:38:50 +08:00
|
|
|
#include "funcapi.h"
|
|
|
|
#include "miscadmin.h"
|
|
|
|
#include "storage/bufmgr.h"
|
2016-06-16 02:33:58 +08:00
|
|
|
#include "storage/procarray.h"
|
2016-06-18 05:37:30 +08:00
|
|
|
#include "storage/smgr.h"
|
2016-03-08 21:38:50 +08:00
|
|
|
#include "utils/rel.h"
|
2019-01-22 09:03:15 +08:00
|
|
|
#include "utils/snapmgr.h"
|
2016-03-08 21:38:50 +08:00
|
|
|
|
|
|
|
PG_MODULE_MAGIC;
|
|
|
|
|
|
|
|
typedef struct vbits
|
|
|
|
{
|
|
|
|
BlockNumber next;
|
|
|
|
BlockNumber count;
|
|
|
|
uint8 bits[FLEXIBLE_ARRAY_MEMBER];
|
|
|
|
} vbits;
|
|
|
|
|
2016-06-16 02:33:58 +08:00
|
|
|
typedef struct corrupt_items
|
|
|
|
{
|
|
|
|
BlockNumber next;
|
|
|
|
BlockNumber count;
|
|
|
|
ItemPointer tids;
|
|
|
|
} corrupt_items;
|
|
|
|
|
2016-03-08 21:38:50 +08:00
|
|
|
PG_FUNCTION_INFO_V1(pg_visibility_map);
|
|
|
|
PG_FUNCTION_INFO_V1(pg_visibility_map_rel);
|
|
|
|
PG_FUNCTION_INFO_V1(pg_visibility);
|
|
|
|
PG_FUNCTION_INFO_V1(pg_visibility_rel);
|
|
|
|
PG_FUNCTION_INFO_V1(pg_visibility_map_summary);
|
2016-06-16 02:33:58 +08:00
|
|
|
PG_FUNCTION_INFO_V1(pg_check_frozen);
|
|
|
|
PG_FUNCTION_INFO_V1(pg_check_visible);
|
2016-06-18 05:37:30 +08:00
|
|
|
PG_FUNCTION_INFO_V1(pg_truncate_visibility_map);
|
2016-03-08 21:38:50 +08:00
|
|
|
|
|
|
|
static TupleDesc pg_visibility_tupdesc(bool include_blkno, bool include_pd);
|
|
|
|
static vbits *collect_visibility_data(Oid relid, bool include_pd);
|
2016-06-16 02:33:58 +08:00
|
|
|
static corrupt_items *collect_corrupt_items(Oid relid, bool all_visible,
|
|
|
|
bool all_frozen);
|
|
|
|
static void record_corrupt_item(corrupt_items *items, ItemPointer tid);
|
|
|
|
static bool tuple_all_visible(HeapTuple tup, TransactionId OldestXmin,
|
|
|
|
Buffer buffer);
|
2017-03-10 05:34:25 +08:00
|
|
|
static void check_relation_relkind(Relation rel);
|
2016-03-08 21:38:50 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Visibility map information for a single block of a relation.
|
Fix bugs in contrib/pg_visibility.
collect_corrupt_items() failed to initialize tuple.t_self. While
HeapTupleSatisfiesVacuum() doesn't actually use that value, it does
Assert that it's valid, so that the code would dump core if ip_posid
chanced to be zero. (That's somewhat unlikely, which probably explains
how this got missed. In any case it wouldn't matter for field use.)
Also, collect_corrupt_items was returning the wrong TIDs, that is the
contents of t_ctid rather than the tuple's own location. This would
be the same thing in simple cases, but it could be wrong if, for
example, a past update attempt had been rolled back, leaving a live
tuple whose t_ctid doesn't point at itself.
Also, in pg_visibility(), guard against trying to read a page past
the end of the rel. The VM code handles inquiries beyond the end
of the map by silently returning zeroes, and it seems like we should
do the same thing here.
I ran into the assertion failure while using pg_visibility to check
pg_upgrade's behavior, and then noted the other problems while
reading the code.
Report: <29043.1475288648@sss.pgh.pa.us>
2016-10-02 04:32:54 +08:00
|
|
|
*
|
|
|
|
* Note: the VM code will silently return zeroes for pages past the end
|
|
|
|
* of the map, so we allow probes up to MaxBlockNumber regardless of the
|
|
|
|
* actual relation size.
|
2016-03-08 21:38:50 +08:00
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_visibility_map(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
int64 blkno = PG_GETARG_INT64(1);
|
|
|
|
int32 mapbits;
|
|
|
|
Relation rel;
|
|
|
|
Buffer vmbuffer = InvalidBuffer;
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
Datum values[2];
|
|
|
|
bool nulls[2];
|
|
|
|
|
|
|
|
rel = relation_open(relid, AccessShareLock);
|
|
|
|
|
2017-03-10 05:34:25 +08:00
|
|
|
/* Only some relkinds have a visibility map */
|
|
|
|
check_relation_relkind(rel);
|
|
|
|
|
2016-03-08 21:38:50 +08:00
|
|
|
if (blkno < 0 || blkno > MaxBlockNumber)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("invalid block number")));
|
|
|
|
|
|
|
|
tupdesc = pg_visibility_tupdesc(false, false);
|
|
|
|
MemSet(nulls, 0, sizeof(nulls));
|
|
|
|
|
|
|
|
mapbits = (int32) visibilitymap_get_status(rel, blkno, &vmbuffer);
|
|
|
|
if (vmbuffer != InvalidBuffer)
|
|
|
|
ReleaseBuffer(vmbuffer);
|
|
|
|
values[0] = BoolGetDatum((mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0);
|
|
|
|
values[1] = BoolGetDatum((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0);
|
|
|
|
|
|
|
|
relation_close(rel, AccessShareLock);
|
|
|
|
|
|
|
|
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Visibility map information for a single block of a relation, plus the
|
|
|
|
* page-level information for the same block.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_visibility(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
int64 blkno = PG_GETARG_INT64(1);
|
|
|
|
int32 mapbits;
|
|
|
|
Relation rel;
|
|
|
|
Buffer vmbuffer = InvalidBuffer;
|
|
|
|
Buffer buffer;
|
|
|
|
Page page;
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
Datum values[3];
|
|
|
|
bool nulls[3];
|
|
|
|
|
|
|
|
rel = relation_open(relid, AccessShareLock);
|
|
|
|
|
2017-03-10 05:34:25 +08:00
|
|
|
/* Only some relkinds have a visibility map */
|
|
|
|
check_relation_relkind(rel);
|
|
|
|
|
2016-03-08 21:38:50 +08:00
|
|
|
if (blkno < 0 || blkno > MaxBlockNumber)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("invalid block number")));
|
|
|
|
|
|
|
|
tupdesc = pg_visibility_tupdesc(false, true);
|
|
|
|
MemSet(nulls, 0, sizeof(nulls));
|
|
|
|
|
|
|
|
mapbits = (int32) visibilitymap_get_status(rel, blkno, &vmbuffer);
|
|
|
|
if (vmbuffer != InvalidBuffer)
|
|
|
|
ReleaseBuffer(vmbuffer);
|
|
|
|
values[0] = BoolGetDatum((mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0);
|
|
|
|
values[1] = BoolGetDatum((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0);
|
|
|
|
|
Fix bugs in contrib/pg_visibility.
collect_corrupt_items() failed to initialize tuple.t_self. While
HeapTupleSatisfiesVacuum() doesn't actually use that value, it does
Assert that it's valid, so that the code would dump core if ip_posid
chanced to be zero. (That's somewhat unlikely, which probably explains
how this got missed. In any case it wouldn't matter for field use.)
Also, collect_corrupt_items was returning the wrong TIDs, that is the
contents of t_ctid rather than the tuple's own location. This would
be the same thing in simple cases, but it could be wrong if, for
example, a past update attempt had been rolled back, leaving a live
tuple whose t_ctid doesn't point at itself.
Also, in pg_visibility(), guard against trying to read a page past
the end of the rel. The VM code handles inquiries beyond the end
of the map by silently returning zeroes, and it seems like we should
do the same thing here.
I ran into the assertion failure while using pg_visibility to check
pg_upgrade's behavior, and then noted the other problems while
reading the code.
Report: <29043.1475288648@sss.pgh.pa.us>
2016-10-02 04:32:54 +08:00
|
|
|
/* Here we have to explicitly check rel size ... */
|
|
|
|
if (blkno < RelationGetNumberOfBlocks(rel))
|
|
|
|
{
|
|
|
|
buffer = ReadBuffer(rel, blkno);
|
|
|
|
LockBuffer(buffer, BUFFER_LOCK_SHARE);
|
2016-03-08 21:38:50 +08:00
|
|
|
|
Fix bugs in contrib/pg_visibility.
collect_corrupt_items() failed to initialize tuple.t_self. While
HeapTupleSatisfiesVacuum() doesn't actually use that value, it does
Assert that it's valid, so that the code would dump core if ip_posid
chanced to be zero. (That's somewhat unlikely, which probably explains
how this got missed. In any case it wouldn't matter for field use.)
Also, collect_corrupt_items was returning the wrong TIDs, that is the
contents of t_ctid rather than the tuple's own location. This would
be the same thing in simple cases, but it could be wrong if, for
example, a past update attempt had been rolled back, leaving a live
tuple whose t_ctid doesn't point at itself.
Also, in pg_visibility(), guard against trying to read a page past
the end of the rel. The VM code handles inquiries beyond the end
of the map by silently returning zeroes, and it seems like we should
do the same thing here.
I ran into the assertion failure while using pg_visibility to check
pg_upgrade's behavior, and then noted the other problems while
reading the code.
Report: <29043.1475288648@sss.pgh.pa.us>
2016-10-02 04:32:54 +08:00
|
|
|
page = BufferGetPage(buffer);
|
|
|
|
values[2] = BoolGetDatum(PageIsAllVisible(page));
|
2016-03-08 21:38:50 +08:00
|
|
|
|
Fix bugs in contrib/pg_visibility.
collect_corrupt_items() failed to initialize tuple.t_self. While
HeapTupleSatisfiesVacuum() doesn't actually use that value, it does
Assert that it's valid, so that the code would dump core if ip_posid
chanced to be zero. (That's somewhat unlikely, which probably explains
how this got missed. In any case it wouldn't matter for field use.)
Also, collect_corrupt_items was returning the wrong TIDs, that is the
contents of t_ctid rather than the tuple's own location. This would
be the same thing in simple cases, but it could be wrong if, for
example, a past update attempt had been rolled back, leaving a live
tuple whose t_ctid doesn't point at itself.
Also, in pg_visibility(), guard against trying to read a page past
the end of the rel. The VM code handles inquiries beyond the end
of the map by silently returning zeroes, and it seems like we should
do the same thing here.
I ran into the assertion failure while using pg_visibility to check
pg_upgrade's behavior, and then noted the other problems while
reading the code.
Report: <29043.1475288648@sss.pgh.pa.us>
2016-10-02 04:32:54 +08:00
|
|
|
UnlockReleaseBuffer(buffer);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* As with the vismap, silently return 0 for pages past EOF */
|
|
|
|
values[2] = BoolGetDatum(false);
|
|
|
|
}
|
2016-03-08 21:38:50 +08:00
|
|
|
|
|
|
|
relation_close(rel, AccessShareLock);
|
|
|
|
|
|
|
|
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Visibility map information for every block in a relation.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_visibility_map_rel(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
FuncCallContext *funcctx;
|
|
|
|
vbits *info;
|
|
|
|
|
|
|
|
if (SRF_IS_FIRSTCALL())
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
|
|
|
|
funcctx = SRF_FIRSTCALL_INIT();
|
|
|
|
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
|
|
|
|
funcctx->tuple_desc = pg_visibility_tupdesc(true, false);
|
2017-03-10 05:34:25 +08:00
|
|
|
/* collect_visibility_data will verify the relkind */
|
2016-03-08 21:38:50 +08:00
|
|
|
funcctx->user_fctx = collect_visibility_data(relid, false);
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
}
|
|
|
|
|
|
|
|
funcctx = SRF_PERCALL_SETUP();
|
|
|
|
info = (vbits *) funcctx->user_fctx;
|
|
|
|
|
|
|
|
if (info->next < info->count)
|
|
|
|
{
|
|
|
|
Datum values[3];
|
|
|
|
bool nulls[3];
|
|
|
|
HeapTuple tuple;
|
|
|
|
|
|
|
|
MemSet(nulls, 0, sizeof(nulls));
|
|
|
|
values[0] = Int64GetDatum(info->next);
|
|
|
|
values[1] = BoolGetDatum((info->bits[info->next] & (1 << 0)) != 0);
|
|
|
|
values[2] = BoolGetDatum((info->bits[info->next] & (1 << 1)) != 0);
|
|
|
|
info->next++;
|
|
|
|
|
|
|
|
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
|
|
|
|
SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
|
|
|
|
}
|
|
|
|
|
|
|
|
SRF_RETURN_DONE(funcctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Visibility map information for every block in a relation, plus the page
|
|
|
|
* level information for each block.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_visibility_rel(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
FuncCallContext *funcctx;
|
|
|
|
vbits *info;
|
|
|
|
|
|
|
|
if (SRF_IS_FIRSTCALL())
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
|
|
|
|
funcctx = SRF_FIRSTCALL_INIT();
|
|
|
|
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
|
|
|
|
funcctx->tuple_desc = pg_visibility_tupdesc(true, true);
|
2017-03-10 05:34:25 +08:00
|
|
|
/* collect_visibility_data will verify the relkind */
|
2016-03-08 21:38:50 +08:00
|
|
|
funcctx->user_fctx = collect_visibility_data(relid, true);
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
}
|
|
|
|
|
|
|
|
funcctx = SRF_PERCALL_SETUP();
|
|
|
|
info = (vbits *) funcctx->user_fctx;
|
|
|
|
|
|
|
|
if (info->next < info->count)
|
|
|
|
{
|
|
|
|
Datum values[4];
|
|
|
|
bool nulls[4];
|
|
|
|
HeapTuple tuple;
|
|
|
|
|
|
|
|
MemSet(nulls, 0, sizeof(nulls));
|
|
|
|
values[0] = Int64GetDatum(info->next);
|
|
|
|
values[1] = BoolGetDatum((info->bits[info->next] & (1 << 0)) != 0);
|
|
|
|
values[2] = BoolGetDatum((info->bits[info->next] & (1 << 1)) != 0);
|
|
|
|
values[3] = BoolGetDatum((info->bits[info->next] & (1 << 2)) != 0);
|
|
|
|
info->next++;
|
|
|
|
|
|
|
|
tuple = heap_form_tuple(funcctx->tuple_desc, values, nulls);
|
|
|
|
SRF_RETURN_NEXT(funcctx, HeapTupleGetDatum(tuple));
|
|
|
|
}
|
|
|
|
|
|
|
|
SRF_RETURN_DONE(funcctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Count the number of all-visible and all-frozen pages in the visibility
|
|
|
|
* map for a particular relation.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_visibility_map_summary(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
Relation rel;
|
|
|
|
BlockNumber nblocks;
|
|
|
|
BlockNumber blkno;
|
|
|
|
Buffer vmbuffer = InvalidBuffer;
|
|
|
|
int64 all_visible = 0;
|
|
|
|
int64 all_frozen = 0;
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
Datum values[2];
|
|
|
|
bool nulls[2];
|
|
|
|
|
|
|
|
rel = relation_open(relid, AccessShareLock);
|
2017-03-10 05:34:25 +08:00
|
|
|
|
|
|
|
/* Only some relkinds have a visibility map */
|
|
|
|
check_relation_relkind(rel);
|
|
|
|
|
2016-03-08 21:38:50 +08:00
|
|
|
nblocks = RelationGetNumberOfBlocks(rel);
|
|
|
|
|
|
|
|
for (blkno = 0; blkno < nblocks; ++blkno)
|
|
|
|
{
|
|
|
|
int32 mapbits;
|
|
|
|
|
|
|
|
/* Make sure we are interruptible. */
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
|
|
/* Get map info. */
|
|
|
|
mapbits = (int32) visibilitymap_get_status(rel, blkno, &vmbuffer);
|
|
|
|
if ((mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0)
|
|
|
|
++all_visible;
|
|
|
|
if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0)
|
|
|
|
++all_frozen;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clean up. */
|
|
|
|
if (vmbuffer != InvalidBuffer)
|
|
|
|
ReleaseBuffer(vmbuffer);
|
|
|
|
relation_close(rel, AccessShareLock);
|
|
|
|
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 07:36:57 +08:00
|
|
|
tupdesc = CreateTemplateTupleDesc(2);
|
2016-03-08 21:38:50 +08:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "all_visible", INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "all_frozen", INT8OID, -1, 0);
|
|
|
|
tupdesc = BlessTupleDesc(tupdesc);
|
|
|
|
|
|
|
|
MemSet(nulls, 0, sizeof(nulls));
|
|
|
|
values[0] = Int64GetDatum(all_visible);
|
|
|
|
values[1] = Int64GetDatum(all_frozen);
|
|
|
|
|
|
|
|
PG_RETURN_DATUM(HeapTupleGetDatum(heap_form_tuple(tupdesc, values, nulls)));
|
|
|
|
}
|
|
|
|
|
2016-06-16 02:33:58 +08:00
|
|
|
/*
|
|
|
|
* Return the TIDs of non-frozen tuples present in pages marked all-frozen
|
|
|
|
* in the visibility map. We hope no one will ever find any, but there could
|
|
|
|
* be bugs, database corruption, etc.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_check_frozen(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
FuncCallContext *funcctx;
|
|
|
|
corrupt_items *items;
|
|
|
|
|
|
|
|
if (SRF_IS_FIRSTCALL())
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
|
|
|
|
funcctx = SRF_FIRSTCALL_INIT();
|
|
|
|
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
|
2017-03-10 05:34:25 +08:00
|
|
|
/* collect_corrupt_items will verify the relkind */
|
2016-06-16 02:33:58 +08:00
|
|
|
funcctx->user_fctx = collect_corrupt_items(relid, false, true);
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
}
|
|
|
|
|
|
|
|
funcctx = SRF_PERCALL_SETUP();
|
|
|
|
items = (corrupt_items *) funcctx->user_fctx;
|
|
|
|
|
|
|
|
if (items->next < items->count)
|
|
|
|
SRF_RETURN_NEXT(funcctx, PointerGetDatum(&items->tids[items->next++]));
|
|
|
|
|
|
|
|
SRF_RETURN_DONE(funcctx);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return the TIDs of not-all-visible tuples in pages marked all-visible
|
|
|
|
* in the visibility map. We hope no one will ever find any, but there could
|
|
|
|
* be bugs, database corruption, etc.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_check_visible(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
FuncCallContext *funcctx;
|
|
|
|
corrupt_items *items;
|
|
|
|
|
|
|
|
if (SRF_IS_FIRSTCALL())
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
|
|
|
|
funcctx = SRF_FIRSTCALL_INIT();
|
|
|
|
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
|
2017-03-10 05:34:25 +08:00
|
|
|
/* collect_corrupt_items will verify the relkind */
|
2016-06-16 02:33:58 +08:00
|
|
|
funcctx->user_fctx = collect_corrupt_items(relid, true, false);
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
}
|
|
|
|
|
|
|
|
funcctx = SRF_PERCALL_SETUP();
|
|
|
|
items = (corrupt_items *) funcctx->user_fctx;
|
|
|
|
|
|
|
|
if (items->next < items->count)
|
|
|
|
SRF_RETURN_NEXT(funcctx, PointerGetDatum(&items->tids[items->next++]));
|
|
|
|
|
|
|
|
SRF_RETURN_DONE(funcctx);
|
|
|
|
}
|
|
|
|
|
2016-06-18 05:37:30 +08:00
|
|
|
/*
|
|
|
|
* Remove the visibility map fork for a relation. If there turn out to be
|
|
|
|
* any bugs in the visibility map code that require rebuilding the VM, this
|
|
|
|
* provides users with a way to do it that is cleaner than shutting down the
|
|
|
|
* server and removing files by hand.
|
|
|
|
*
|
|
|
|
* This is a cut-down version of RelationTruncate.
|
|
|
|
*/
|
|
|
|
Datum
|
|
|
|
pg_truncate_visibility_map(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
Oid relid = PG_GETARG_OID(0);
|
|
|
|
Relation rel;
|
Speedup truncations of relation forks.
When a relation is truncated, shared_buffers needs to be scanned
so that any buffers for the relation forks are invalidated in it.
Previously, shared_buffers was scanned for each relation forks, i.e.,
MAIN, FSM and VM, when VACUUM truncated off any empty pages
at the end of relation or TRUNCATE truncated the relation in place.
Since shared_buffers needed to be scanned multiple times,
it could take a long time to finish those commands especially
when shared_buffers was large.
This commit changes the logic so that shared_buffers is scanned only
one time for those three relation forks.
Author: Kirk Jamison
Reviewed-by: Masahiko Sawada, Thomas Munro, Alvaro Herrera, Takayuki Tsunakawa and Fujii Masao
Discussion: https://postgr.es/m/D09B13F772D2274BB348A310EE3027C64E2067@g01jpexmbkw24
2019-09-24 16:31:26 +08:00
|
|
|
ForkNumber fork;
|
|
|
|
BlockNumber block;
|
2016-06-18 05:37:30 +08:00
|
|
|
|
|
|
|
rel = relation_open(relid, AccessExclusiveLock);
|
|
|
|
|
2017-03-10 05:34:25 +08:00
|
|
|
/* Only some relkinds have a visibility map */
|
|
|
|
check_relation_relkind(rel);
|
2016-06-18 05:37:30 +08:00
|
|
|
|
2021-07-13 05:01:29 +08:00
|
|
|
/* Forcibly reset cached file size */
|
|
|
|
RelationGetSmgr(rel)->smgr_cached_nblocks[VISIBILITYMAP_FORKNUM] = InvalidBlockNumber;
|
2016-06-18 05:37:30 +08:00
|
|
|
|
Speedup truncations of relation forks.
When a relation is truncated, shared_buffers needs to be scanned
so that any buffers for the relation forks are invalidated in it.
Previously, shared_buffers was scanned for each relation forks, i.e.,
MAIN, FSM and VM, when VACUUM truncated off any empty pages
at the end of relation or TRUNCATE truncated the relation in place.
Since shared_buffers needed to be scanned multiple times,
it could take a long time to finish those commands especially
when shared_buffers was large.
This commit changes the logic so that shared_buffers is scanned only
one time for those three relation forks.
Author: Kirk Jamison
Reviewed-by: Masahiko Sawada, Thomas Munro, Alvaro Herrera, Takayuki Tsunakawa and Fujii Masao
Discussion: https://postgr.es/m/D09B13F772D2274BB348A310EE3027C64E2067@g01jpexmbkw24
2019-09-24 16:31:26 +08:00
|
|
|
block = visibilitymap_prepare_truncate(rel, 0);
|
|
|
|
if (BlockNumberIsValid(block))
|
|
|
|
{
|
|
|
|
fork = VISIBILITYMAP_FORKNUM;
|
2021-07-13 05:01:29 +08:00
|
|
|
smgrtruncate(RelationGetSmgr(rel), &fork, 1, &block);
|
Speedup truncations of relation forks.
When a relation is truncated, shared_buffers needs to be scanned
so that any buffers for the relation forks are invalidated in it.
Previously, shared_buffers was scanned for each relation forks, i.e.,
MAIN, FSM and VM, when VACUUM truncated off any empty pages
at the end of relation or TRUNCATE truncated the relation in place.
Since shared_buffers needed to be scanned multiple times,
it could take a long time to finish those commands especially
when shared_buffers was large.
This commit changes the logic so that shared_buffers is scanned only
one time for those three relation forks.
Author: Kirk Jamison
Reviewed-by: Masahiko Sawada, Thomas Munro, Alvaro Herrera, Takayuki Tsunakawa and Fujii Masao
Discussion: https://postgr.es/m/D09B13F772D2274BB348A310EE3027C64E2067@g01jpexmbkw24
2019-09-24 16:31:26 +08:00
|
|
|
}
|
2016-06-18 05:37:30 +08:00
|
|
|
|
|
|
|
if (RelationNeedsWAL(rel))
|
|
|
|
{
|
|
|
|
xl_smgr_truncate xlrec;
|
|
|
|
|
|
|
|
xlrec.blkno = 0;
|
|
|
|
xlrec.rnode = rel->rd_node;
|
|
|
|
xlrec.flags = SMGR_TRUNCATE_VM;
|
|
|
|
|
|
|
|
XLogBeginInsert();
|
|
|
|
XLogRegisterData((char *) &xlrec, sizeof(xlrec));
|
|
|
|
|
|
|
|
XLogInsert(RM_SMGR_ID, XLOG_SMGR_TRUNCATE | XLR_SPECIAL_REL_UPDATE);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Release the lock right away, not at commit time.
|
|
|
|
*
|
|
|
|
* It would be a problem to release the lock prior to commit if this
|
|
|
|
* truncate operation sends any transactional invalidation messages. Other
|
|
|
|
* backends would potentially be able to lock the relation without
|
|
|
|
* processing them in the window of time between when we release the lock
|
|
|
|
* here and when we sent the messages at our eventual commit. However,
|
|
|
|
* we're currently only sending a non-transactional smgr invalidation,
|
|
|
|
* which will have been posted to shared memory immediately from within
|
Speedup truncations of relation forks.
When a relation is truncated, shared_buffers needs to be scanned
so that any buffers for the relation forks are invalidated in it.
Previously, shared_buffers was scanned for each relation forks, i.e.,
MAIN, FSM and VM, when VACUUM truncated off any empty pages
at the end of relation or TRUNCATE truncated the relation in place.
Since shared_buffers needed to be scanned multiple times,
it could take a long time to finish those commands especially
when shared_buffers was large.
This commit changes the logic so that shared_buffers is scanned only
one time for those three relation forks.
Author: Kirk Jamison
Reviewed-by: Masahiko Sawada, Thomas Munro, Alvaro Herrera, Takayuki Tsunakawa and Fujii Masao
Discussion: https://postgr.es/m/D09B13F772D2274BB348A310EE3027C64E2067@g01jpexmbkw24
2019-09-24 16:31:26 +08:00
|
|
|
* smgr_truncate. Therefore, there should be no race here.
|
2016-06-18 05:37:30 +08:00
|
|
|
*
|
|
|
|
* The reason why it's desirable to release the lock early here is because
|
|
|
|
* of the possibility that someone will need to use this to blow away many
|
|
|
|
* visibility map forks at once. If we can't release the lock until
|
|
|
|
* commit time, the transaction doing this will accumulate
|
|
|
|
* AccessExclusiveLocks on all of those relations at the same time, which
|
|
|
|
* is undesirable. However, if this turns out to be unsafe we may have no
|
|
|
|
* choice...
|
|
|
|
*/
|
|
|
|
relation_close(rel, AccessExclusiveLock);
|
|
|
|
|
|
|
|
/* Nothing to return. */
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
|
|
|
|
2016-03-08 21:38:50 +08:00
|
|
|
/*
|
|
|
|
* Helper function to construct whichever TupleDesc we need for a particular
|
|
|
|
* call.
|
|
|
|
*/
|
|
|
|
static TupleDesc
|
|
|
|
pg_visibility_tupdesc(bool include_blkno, bool include_pd)
|
|
|
|
{
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
AttrNumber maxattr = 2;
|
|
|
|
AttrNumber a = 0;
|
|
|
|
|
|
|
|
if (include_blkno)
|
|
|
|
++maxattr;
|
|
|
|
if (include_pd)
|
|
|
|
++maxattr;
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 07:36:57 +08:00
|
|
|
tupdesc = CreateTemplateTupleDesc(maxattr);
|
2016-03-08 21:38:50 +08:00
|
|
|
if (include_blkno)
|
|
|
|
TupleDescInitEntry(tupdesc, ++a, "blkno", INT8OID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, ++a, "all_visible", BOOLOID, -1, 0);
|
|
|
|
TupleDescInitEntry(tupdesc, ++a, "all_frozen", BOOLOID, -1, 0);
|
|
|
|
if (include_pd)
|
|
|
|
TupleDescInitEntry(tupdesc, ++a, "pd_all_visible", BOOLOID, -1, 0);
|
|
|
|
Assert(a == maxattr);
|
|
|
|
|
|
|
|
return BlessTupleDesc(tupdesc);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Collect visibility data about a relation.
|
2017-03-10 05:34:25 +08:00
|
|
|
*
|
|
|
|
* Checks relkind of relid and will throw an error if the relation does not
|
|
|
|
* have a VM.
|
2016-03-08 21:38:50 +08:00
|
|
|
*/
|
|
|
|
static vbits *
|
|
|
|
collect_visibility_data(Oid relid, bool include_pd)
|
|
|
|
{
|
|
|
|
Relation rel;
|
|
|
|
BlockNumber nblocks;
|
|
|
|
vbits *info;
|
|
|
|
BlockNumber blkno;
|
|
|
|
Buffer vmbuffer = InvalidBuffer;
|
|
|
|
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
|
|
|
|
|
|
|
|
rel = relation_open(relid, AccessShareLock);
|
|
|
|
|
2017-03-10 05:34:25 +08:00
|
|
|
/* Only some relkinds have a visibility map */
|
|
|
|
check_relation_relkind(rel);
|
|
|
|
|
2016-03-08 21:38:50 +08:00
|
|
|
nblocks = RelationGetNumberOfBlocks(rel);
|
|
|
|
info = palloc0(offsetof(vbits, bits) + nblocks);
|
|
|
|
info->next = 0;
|
|
|
|
info->count = nblocks;
|
|
|
|
|
|
|
|
for (blkno = 0; blkno < nblocks; ++blkno)
|
|
|
|
{
|
|
|
|
int32 mapbits;
|
|
|
|
|
|
|
|
/* Make sure we are interruptible. */
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
|
|
/* Get map info. */
|
|
|
|
mapbits = (int32) visibilitymap_get_status(rel, blkno, &vmbuffer);
|
|
|
|
if ((mapbits & VISIBILITYMAP_ALL_VISIBLE) != 0)
|
|
|
|
info->bits[blkno] |= (1 << 0);
|
|
|
|
if ((mapbits & VISIBILITYMAP_ALL_FROZEN) != 0)
|
|
|
|
info->bits[blkno] |= (1 << 1);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Page-level data requires reading every block, so only get it if the
|
|
|
|
* caller needs it. Use a buffer access strategy, too, to prevent
|
|
|
|
* cache-trashing.
|
|
|
|
*/
|
|
|
|
if (include_pd)
|
|
|
|
{
|
|
|
|
Buffer buffer;
|
|
|
|
Page page;
|
|
|
|
|
|
|
|
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
|
|
|
|
bstrategy);
|
|
|
|
LockBuffer(buffer, BUFFER_LOCK_SHARE);
|
|
|
|
|
2016-04-20 21:31:19 +08:00
|
|
|
page = BufferGetPage(buffer);
|
2016-03-08 21:38:50 +08:00
|
|
|
if (PageIsAllVisible(page))
|
|
|
|
info->bits[blkno] |= (1 << 2);
|
|
|
|
|
|
|
|
UnlockReleaseBuffer(buffer);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clean up. */
|
|
|
|
if (vmbuffer != InvalidBuffer)
|
|
|
|
ReleaseBuffer(vmbuffer);
|
|
|
|
relation_close(rel, AccessShareLock);
|
|
|
|
|
|
|
|
return info;
|
|
|
|
}
|
2016-06-16 02:33:58 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Returns a list of items whose visibility map information does not match
|
|
|
|
* the status of the tuples on the page.
|
|
|
|
*
|
|
|
|
* If all_visible is passed as true, this will include all items which are
|
|
|
|
* on pages marked as all-visible in the visibility map but which do not
|
|
|
|
* seem to in fact be all-visible.
|
|
|
|
*
|
|
|
|
* If all_frozen is passed as true, this will include all items which are
|
|
|
|
* on pages marked as all-frozen but which do not seem to in fact be frozen.
|
2017-03-10 05:34:25 +08:00
|
|
|
*
|
|
|
|
* Checks relkind of relid and will throw an error if the relation does not
|
|
|
|
* have a VM.
|
2016-06-16 02:33:58 +08:00
|
|
|
*/
|
|
|
|
static corrupt_items *
|
|
|
|
collect_corrupt_items(Oid relid, bool all_visible, bool all_frozen)
|
|
|
|
{
|
|
|
|
Relation rel;
|
|
|
|
BlockNumber nblocks;
|
|
|
|
corrupt_items *items;
|
|
|
|
BlockNumber blkno;
|
|
|
|
Buffer vmbuffer = InvalidBuffer;
|
|
|
|
BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
|
|
|
|
TransactionId OldestXmin = InvalidTransactionId;
|
|
|
|
|
|
|
|
rel = relation_open(relid, AccessShareLock);
|
|
|
|
|
2017-03-10 05:34:25 +08:00
|
|
|
/* Only some relkinds have a visibility map */
|
|
|
|
check_relation_relkind(rel);
|
2016-06-16 02:33:58 +08:00
|
|
|
|
snapshot scalability: Don't compute global horizons while building snapshots.
To make GetSnapshotData() more scalable, it cannot not look at at each proc's
xmin: While snapshot contents do not need to change whenever a read-only
transaction commits or a snapshot is released, a proc's xmin is modified in
those cases. The frequency of xmin modifications leads to, particularly on
higher core count systems, many cache misses inside GetSnapshotData(), despite
the data underlying a snapshot not changing. That is the most
significant source of GetSnapshotData() scaling poorly on larger systems.
Without accessing xmins, GetSnapshotData() cannot calculate accurate horizons /
thresholds as it has so far. But we don't really have to: The horizons don't
actually change that much between GetSnapshotData() calls. Nor are the horizons
actually used every time a snapshot is built.
The trick this commit introduces is to delay computation of accurate horizons
until there use and using horizon boundaries to determine whether accurate
horizons need to be computed.
The use of RecentGlobal[Data]Xmin to decide whether a row version could be
removed has been replaces with new GlobalVisTest* functions. These use two
thresholds to determine whether a row can be pruned:
1) definitely_needed, indicating that rows deleted by XIDs >= definitely_needed
are definitely still visible.
2) maybe_needed, indicating that rows deleted by XIDs < maybe_needed can
definitely be removed
GetSnapshotData() updates definitely_needed to be the xmin of the computed
snapshot.
When testing whether a row can be removed (with GlobalVisTestIsRemovableXid())
and the tested XID falls in between the two (i.e. XID >= maybe_needed && XID <
definitely_needed) the boundaries can be recomputed to be more accurate. As it
is not cheap to compute accurate boundaries, we limit the number of times that
happens in short succession. As the boundaries used by
GlobalVisTestIsRemovableXid() are never reset (with maybe_needed updated by
GetSnapshotData()), it is likely that further test can benefit from an earlier
computation of accurate horizons.
To avoid regressing performance when old_snapshot_threshold is set (as that
requires an accurate horizon to be computed), heap_page_prune_opt() doesn't
unconditionally call TransactionIdLimitedForOldSnapshots() anymore. Both the
computation of the limited horizon, and the triggering of errors (with
SetOldSnapshotThresholdTimestamp()) is now only done when necessary to remove
tuples.
This commit just removes the accesses to PGXACT->xmin from
GetSnapshotData(), but other members of PGXACT residing in the same
cache line are accessed. Therefore this in itself does not result in a
significant improvement. Subsequent commits will take advantage of the
fact that GetSnapshotData() now does not need to access xmins anymore.
Note: This contains a workaround in heap_page_prune_opt() to keep the
snapshot_too_old tests working. While that workaround is ugly, the tests
currently are not meaningful, and it seems best to address them separately.
Author: Andres Freund <andres@anarazel.de>
Reviewed-By: Robert Haas <robertmhaas@gmail.com>
Reviewed-By: Thomas Munro <thomas.munro@gmail.com>
Reviewed-By: David Rowley <dgrowleyml@gmail.com>
Discussion: https://postgr.es/m/20200301083601.ews6hz5dduc3w2se@alap3.anarazel.de
2020-08-13 07:03:49 +08:00
|
|
|
if (all_visible)
|
|
|
|
OldestXmin = GetOldestNonRemovableTransactionId(rel);
|
|
|
|
|
2016-06-16 02:33:58 +08:00
|
|
|
nblocks = RelationGetNumberOfBlocks(rel);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Guess an initial array size. We don't expect many corrupted tuples, so
|
|
|
|
* start with a small array. This function uses the "next" field to track
|
|
|
|
* the next offset where we can store an item (which is the same thing as
|
|
|
|
* the number of items found so far) and the "count" field to track the
|
|
|
|
* number of entries allocated. We'll repurpose these fields before
|
|
|
|
* returning.
|
|
|
|
*/
|
|
|
|
items = palloc0(sizeof(corrupt_items));
|
|
|
|
items->next = 0;
|
|
|
|
items->count = 64;
|
|
|
|
items->tids = palloc(items->count * sizeof(ItemPointerData));
|
|
|
|
|
|
|
|
/* Loop over every block in the relation. */
|
|
|
|
for (blkno = 0; blkno < nblocks; ++blkno)
|
|
|
|
{
|
|
|
|
bool check_frozen = false;
|
|
|
|
bool check_visible = false;
|
|
|
|
Buffer buffer;
|
|
|
|
Page page;
|
|
|
|
OffsetNumber offnum,
|
|
|
|
maxoff;
|
|
|
|
|
|
|
|
/* Make sure we are interruptible. */
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
|
|
/* Use the visibility map to decide whether to check this page. */
|
|
|
|
if (all_frozen && VM_ALL_FROZEN(rel, blkno, &vmbuffer))
|
|
|
|
check_frozen = true;
|
|
|
|
if (all_visible && VM_ALL_VISIBLE(rel, blkno, &vmbuffer))
|
|
|
|
check_visible = true;
|
|
|
|
if (!check_visible && !check_frozen)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Read and lock the page. */
|
|
|
|
buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL,
|
|
|
|
bstrategy);
|
|
|
|
LockBuffer(buffer, BUFFER_LOCK_SHARE);
|
|
|
|
|
|
|
|
page = BufferGetPage(buffer);
|
|
|
|
maxoff = PageGetMaxOffsetNumber(page);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The visibility map bits might have changed while we were acquiring
|
|
|
|
* the page lock. Recheck to avoid returning spurious results.
|
|
|
|
*/
|
|
|
|
if (check_frozen && !VM_ALL_FROZEN(rel, blkno, &vmbuffer))
|
|
|
|
check_frozen = false;
|
|
|
|
if (check_visible && !VM_ALL_VISIBLE(rel, blkno, &vmbuffer))
|
|
|
|
check_visible = false;
|
|
|
|
if (!check_visible && !check_frozen)
|
|
|
|
{
|
|
|
|
UnlockReleaseBuffer(buffer);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Iterate over each tuple on the page. */
|
|
|
|
for (offnum = FirstOffsetNumber;
|
|
|
|
offnum <= maxoff;
|
|
|
|
offnum = OffsetNumberNext(offnum))
|
|
|
|
{
|
|
|
|
HeapTupleData tuple;
|
|
|
|
ItemId itemid;
|
|
|
|
|
|
|
|
itemid = PageGetItemId(page, offnum);
|
|
|
|
|
|
|
|
/* Unused or redirect line pointers are of no interest. */
|
|
|
|
if (!ItemIdIsUsed(itemid) || ItemIdIsRedirected(itemid))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Dead line pointers are neither all-visible nor frozen. */
|
|
|
|
if (ItemIdIsDead(itemid))
|
|
|
|
{
|
Fix bugs in contrib/pg_visibility.
collect_corrupt_items() failed to initialize tuple.t_self. While
HeapTupleSatisfiesVacuum() doesn't actually use that value, it does
Assert that it's valid, so that the code would dump core if ip_posid
chanced to be zero. (That's somewhat unlikely, which probably explains
how this got missed. In any case it wouldn't matter for field use.)
Also, collect_corrupt_items was returning the wrong TIDs, that is the
contents of t_ctid rather than the tuple's own location. This would
be the same thing in simple cases, but it could be wrong if, for
example, a past update attempt had been rolled back, leaving a live
tuple whose t_ctid doesn't point at itself.
Also, in pg_visibility(), guard against trying to read a page past
the end of the rel. The VM code handles inquiries beyond the end
of the map by silently returning zeroes, and it seems like we should
do the same thing here.
I ran into the assertion failure while using pg_visibility to check
pg_upgrade's behavior, and then noted the other problems while
reading the code.
Report: <29043.1475288648@sss.pgh.pa.us>
2016-10-02 04:32:54 +08:00
|
|
|
ItemPointerSet(&(tuple.t_self), blkno, offnum);
|
|
|
|
record_corrupt_item(items, &tuple.t_self);
|
2016-06-16 02:33:58 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize a HeapTupleData structure for checks below. */
|
Fix bugs in contrib/pg_visibility.
collect_corrupt_items() failed to initialize tuple.t_self. While
HeapTupleSatisfiesVacuum() doesn't actually use that value, it does
Assert that it's valid, so that the code would dump core if ip_posid
chanced to be zero. (That's somewhat unlikely, which probably explains
how this got missed. In any case it wouldn't matter for field use.)
Also, collect_corrupt_items was returning the wrong TIDs, that is the
contents of t_ctid rather than the tuple's own location. This would
be the same thing in simple cases, but it could be wrong if, for
example, a past update attempt had been rolled back, leaving a live
tuple whose t_ctid doesn't point at itself.
Also, in pg_visibility(), guard against trying to read a page past
the end of the rel. The VM code handles inquiries beyond the end
of the map by silently returning zeroes, and it seems like we should
do the same thing here.
I ran into the assertion failure while using pg_visibility to check
pg_upgrade's behavior, and then noted the other problems while
reading the code.
Report: <29043.1475288648@sss.pgh.pa.us>
2016-10-02 04:32:54 +08:00
|
|
|
ItemPointerSet(&(tuple.t_self), blkno, offnum);
|
2016-06-16 02:33:58 +08:00
|
|
|
tuple.t_data = (HeapTupleHeader) PageGetItem(page, itemid);
|
|
|
|
tuple.t_len = ItemIdGetLength(itemid);
|
|
|
|
tuple.t_tableOid = relid;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're checking whether the page is all-visible, we expect
|
|
|
|
* the tuple to be all-visible.
|
|
|
|
*/
|
|
|
|
if (check_visible &&
|
|
|
|
!tuple_all_visible(&tuple, OldestXmin, buffer))
|
|
|
|
{
|
|
|
|
TransactionId RecomputedOldestXmin;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Time has passed since we computed OldestXmin, so it's
|
|
|
|
* possible that this tuple is all-visible in reality even
|
|
|
|
* though it doesn't appear so based on our
|
|
|
|
* previously-computed value. Let's compute a new value so we
|
|
|
|
* can be certain whether there is a problem.
|
|
|
|
*
|
|
|
|
* From a concurrency point of view, it sort of sucks to
|
|
|
|
* retake ProcArrayLock here while we're holding the buffer
|
|
|
|
* exclusively locked, but it should be safe against
|
snapshot scalability: Don't compute global horizons while building snapshots.
To make GetSnapshotData() more scalable, it cannot not look at at each proc's
xmin: While snapshot contents do not need to change whenever a read-only
transaction commits or a snapshot is released, a proc's xmin is modified in
those cases. The frequency of xmin modifications leads to, particularly on
higher core count systems, many cache misses inside GetSnapshotData(), despite
the data underlying a snapshot not changing. That is the most
significant source of GetSnapshotData() scaling poorly on larger systems.
Without accessing xmins, GetSnapshotData() cannot calculate accurate horizons /
thresholds as it has so far. But we don't really have to: The horizons don't
actually change that much between GetSnapshotData() calls. Nor are the horizons
actually used every time a snapshot is built.
The trick this commit introduces is to delay computation of accurate horizons
until there use and using horizon boundaries to determine whether accurate
horizons need to be computed.
The use of RecentGlobal[Data]Xmin to decide whether a row version could be
removed has been replaces with new GlobalVisTest* functions. These use two
thresholds to determine whether a row can be pruned:
1) definitely_needed, indicating that rows deleted by XIDs >= definitely_needed
are definitely still visible.
2) maybe_needed, indicating that rows deleted by XIDs < maybe_needed can
definitely be removed
GetSnapshotData() updates definitely_needed to be the xmin of the computed
snapshot.
When testing whether a row can be removed (with GlobalVisTestIsRemovableXid())
and the tested XID falls in between the two (i.e. XID >= maybe_needed && XID <
definitely_needed) the boundaries can be recomputed to be more accurate. As it
is not cheap to compute accurate boundaries, we limit the number of times that
happens in short succession. As the boundaries used by
GlobalVisTestIsRemovableXid() are never reset (with maybe_needed updated by
GetSnapshotData()), it is likely that further test can benefit from an earlier
computation of accurate horizons.
To avoid regressing performance when old_snapshot_threshold is set (as that
requires an accurate horizon to be computed), heap_page_prune_opt() doesn't
unconditionally call TransactionIdLimitedForOldSnapshots() anymore. Both the
computation of the limited horizon, and the triggering of errors (with
SetOldSnapshotThresholdTimestamp()) is now only done when necessary to remove
tuples.
This commit just removes the accesses to PGXACT->xmin from
GetSnapshotData(), but other members of PGXACT residing in the same
cache line are accessed. Therefore this in itself does not result in a
significant improvement. Subsequent commits will take advantage of the
fact that GetSnapshotData() now does not need to access xmins anymore.
Note: This contains a workaround in heap_page_prune_opt() to keep the
snapshot_too_old tests working. While that workaround is ugly, the tests
currently are not meaningful, and it seems best to address them separately.
Author: Andres Freund <andres@anarazel.de>
Reviewed-By: Robert Haas <robertmhaas@gmail.com>
Reviewed-By: Thomas Munro <thomas.munro@gmail.com>
Reviewed-By: David Rowley <dgrowleyml@gmail.com>
Discussion: https://postgr.es/m/20200301083601.ews6hz5dduc3w2se@alap3.anarazel.de
2020-08-13 07:03:49 +08:00
|
|
|
* deadlocks, because surely
|
|
|
|
* GetOldestNonRemovableTransactionId() should never take a
|
|
|
|
* buffer lock. And this shouldn't happen often, so it's worth
|
|
|
|
* being careful so as to avoid false positives.
|
2016-06-16 02:33:58 +08:00
|
|
|
*/
|
snapshot scalability: Don't compute global horizons while building snapshots.
To make GetSnapshotData() more scalable, it cannot not look at at each proc's
xmin: While snapshot contents do not need to change whenever a read-only
transaction commits or a snapshot is released, a proc's xmin is modified in
those cases. The frequency of xmin modifications leads to, particularly on
higher core count systems, many cache misses inside GetSnapshotData(), despite
the data underlying a snapshot not changing. That is the most
significant source of GetSnapshotData() scaling poorly on larger systems.
Without accessing xmins, GetSnapshotData() cannot calculate accurate horizons /
thresholds as it has so far. But we don't really have to: The horizons don't
actually change that much between GetSnapshotData() calls. Nor are the horizons
actually used every time a snapshot is built.
The trick this commit introduces is to delay computation of accurate horizons
until there use and using horizon boundaries to determine whether accurate
horizons need to be computed.
The use of RecentGlobal[Data]Xmin to decide whether a row version could be
removed has been replaces with new GlobalVisTest* functions. These use two
thresholds to determine whether a row can be pruned:
1) definitely_needed, indicating that rows deleted by XIDs >= definitely_needed
are definitely still visible.
2) maybe_needed, indicating that rows deleted by XIDs < maybe_needed can
definitely be removed
GetSnapshotData() updates definitely_needed to be the xmin of the computed
snapshot.
When testing whether a row can be removed (with GlobalVisTestIsRemovableXid())
and the tested XID falls in between the two (i.e. XID >= maybe_needed && XID <
definitely_needed) the boundaries can be recomputed to be more accurate. As it
is not cheap to compute accurate boundaries, we limit the number of times that
happens in short succession. As the boundaries used by
GlobalVisTestIsRemovableXid() are never reset (with maybe_needed updated by
GetSnapshotData()), it is likely that further test can benefit from an earlier
computation of accurate horizons.
To avoid regressing performance when old_snapshot_threshold is set (as that
requires an accurate horizon to be computed), heap_page_prune_opt() doesn't
unconditionally call TransactionIdLimitedForOldSnapshots() anymore. Both the
computation of the limited horizon, and the triggering of errors (with
SetOldSnapshotThresholdTimestamp()) is now only done when necessary to remove
tuples.
This commit just removes the accesses to PGXACT->xmin from
GetSnapshotData(), but other members of PGXACT residing in the same
cache line are accessed. Therefore this in itself does not result in a
significant improvement. Subsequent commits will take advantage of the
fact that GetSnapshotData() now does not need to access xmins anymore.
Note: This contains a workaround in heap_page_prune_opt() to keep the
snapshot_too_old tests working. While that workaround is ugly, the tests
currently are not meaningful, and it seems best to address them separately.
Author: Andres Freund <andres@anarazel.de>
Reviewed-By: Robert Haas <robertmhaas@gmail.com>
Reviewed-By: Thomas Munro <thomas.munro@gmail.com>
Reviewed-By: David Rowley <dgrowleyml@gmail.com>
Discussion: https://postgr.es/m/20200301083601.ews6hz5dduc3w2se@alap3.anarazel.de
2020-08-13 07:03:49 +08:00
|
|
|
RecomputedOldestXmin = GetOldestNonRemovableTransactionId(rel);
|
2016-06-16 02:33:58 +08:00
|
|
|
|
|
|
|
if (!TransactionIdPrecedes(OldestXmin, RecomputedOldestXmin))
|
Fix bugs in contrib/pg_visibility.
collect_corrupt_items() failed to initialize tuple.t_self. While
HeapTupleSatisfiesVacuum() doesn't actually use that value, it does
Assert that it's valid, so that the code would dump core if ip_posid
chanced to be zero. (That's somewhat unlikely, which probably explains
how this got missed. In any case it wouldn't matter for field use.)
Also, collect_corrupt_items was returning the wrong TIDs, that is the
contents of t_ctid rather than the tuple's own location. This would
be the same thing in simple cases, but it could be wrong if, for
example, a past update attempt had been rolled back, leaving a live
tuple whose t_ctid doesn't point at itself.
Also, in pg_visibility(), guard against trying to read a page past
the end of the rel. The VM code handles inquiries beyond the end
of the map by silently returning zeroes, and it seems like we should
do the same thing here.
I ran into the assertion failure while using pg_visibility to check
pg_upgrade's behavior, and then noted the other problems while
reading the code.
Report: <29043.1475288648@sss.pgh.pa.us>
2016-10-02 04:32:54 +08:00
|
|
|
record_corrupt_item(items, &tuple.t_self);
|
2016-06-16 02:33:58 +08:00
|
|
|
else
|
|
|
|
{
|
|
|
|
OldestXmin = RecomputedOldestXmin;
|
|
|
|
if (!tuple_all_visible(&tuple, OldestXmin, buffer))
|
Fix bugs in contrib/pg_visibility.
collect_corrupt_items() failed to initialize tuple.t_self. While
HeapTupleSatisfiesVacuum() doesn't actually use that value, it does
Assert that it's valid, so that the code would dump core if ip_posid
chanced to be zero. (That's somewhat unlikely, which probably explains
how this got missed. In any case it wouldn't matter for field use.)
Also, collect_corrupt_items was returning the wrong TIDs, that is the
contents of t_ctid rather than the tuple's own location. This would
be the same thing in simple cases, but it could be wrong if, for
example, a past update attempt had been rolled back, leaving a live
tuple whose t_ctid doesn't point at itself.
Also, in pg_visibility(), guard against trying to read a page past
the end of the rel. The VM code handles inquiries beyond the end
of the map by silently returning zeroes, and it seems like we should
do the same thing here.
I ran into the assertion failure while using pg_visibility to check
pg_upgrade's behavior, and then noted the other problems while
reading the code.
Report: <29043.1475288648@sss.pgh.pa.us>
2016-10-02 04:32:54 +08:00
|
|
|
record_corrupt_item(items, &tuple.t_self);
|
2016-06-16 02:33:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're checking whether the page is all-frozen, we expect the
|
|
|
|
* tuple to be in a state where it will never need freezing.
|
|
|
|
*/
|
|
|
|
if (check_frozen)
|
|
|
|
{
|
|
|
|
if (heap_tuple_needs_eventual_freeze(tuple.t_data))
|
Fix bugs in contrib/pg_visibility.
collect_corrupt_items() failed to initialize tuple.t_self. While
HeapTupleSatisfiesVacuum() doesn't actually use that value, it does
Assert that it's valid, so that the code would dump core if ip_posid
chanced to be zero. (That's somewhat unlikely, which probably explains
how this got missed. In any case it wouldn't matter for field use.)
Also, collect_corrupt_items was returning the wrong TIDs, that is the
contents of t_ctid rather than the tuple's own location. This would
be the same thing in simple cases, but it could be wrong if, for
example, a past update attempt had been rolled back, leaving a live
tuple whose t_ctid doesn't point at itself.
Also, in pg_visibility(), guard against trying to read a page past
the end of the rel. The VM code handles inquiries beyond the end
of the map by silently returning zeroes, and it seems like we should
do the same thing here.
I ran into the assertion failure while using pg_visibility to check
pg_upgrade's behavior, and then noted the other problems while
reading the code.
Report: <29043.1475288648@sss.pgh.pa.us>
2016-10-02 04:32:54 +08:00
|
|
|
record_corrupt_item(items, &tuple.t_self);
|
2016-06-16 02:33:58 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
UnlockReleaseBuffer(buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clean up. */
|
|
|
|
if (vmbuffer != InvalidBuffer)
|
|
|
|
ReleaseBuffer(vmbuffer);
|
|
|
|
relation_close(rel, AccessShareLock);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Before returning, repurpose the fields to match caller's expectations.
|
|
|
|
* next is now the next item that should be read (rather than written) and
|
|
|
|
* count is now the number of items we wrote (rather than the number we
|
|
|
|
* allocated).
|
|
|
|
*/
|
|
|
|
items->count = items->next;
|
|
|
|
items->next = 0;
|
|
|
|
|
|
|
|
return items;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Remember one corrupt item.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
record_corrupt_item(corrupt_items *items, ItemPointer tid)
|
|
|
|
{
|
|
|
|
/* enlarge output array if needed. */
|
|
|
|
if (items->next >= items->count)
|
|
|
|
{
|
|
|
|
items->count *= 2;
|
|
|
|
items->tids = repalloc(items->tids,
|
|
|
|
items->count * sizeof(ItemPointerData));
|
|
|
|
}
|
|
|
|
/* and add the new item */
|
|
|
|
items->tids[items->next++] = *tid;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Check whether a tuple is all-visible relative to a given OldestXmin value.
|
|
|
|
* The buffer should contain the tuple and should be locked and pinned.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
tuple_all_visible(HeapTuple tup, TransactionId OldestXmin, Buffer buffer)
|
|
|
|
{
|
|
|
|
HTSV_Result state;
|
|
|
|
TransactionId xmin;
|
|
|
|
|
|
|
|
state = HeapTupleSatisfiesVacuum(tup, OldestXmin, buffer);
|
|
|
|
if (state != HEAPTUPLE_LIVE)
|
|
|
|
return false; /* all-visible implies live */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Neither lazy_scan_heap nor heap_page_is_all_visible will mark a page
|
|
|
|
* all-visible unless every tuple is hinted committed. However, those hint
|
|
|
|
* bits could be lost after a crash, so we can't be certain that they'll
|
|
|
|
* be set here. So just check the xmin.
|
|
|
|
*/
|
|
|
|
|
|
|
|
xmin = HeapTupleHeaderGetXmin(tup->t_data);
|
|
|
|
if (!TransactionIdPrecedes(xmin, OldestXmin))
|
|
|
|
return false; /* xmin not old enough for all to see */
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2017-03-10 05:34:25 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* check_relation_relkind - convenience routine to check that relation
|
|
|
|
* is of the relkind supported by the callers
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
check_relation_relkind(Relation rel)
|
|
|
|
{
|
|
|
|
if (rel->rd_rel->relkind != RELKIND_RELATION &&
|
|
|
|
rel->rd_rel->relkind != RELKIND_MATVIEW &&
|
|
|
|
rel->rd_rel->relkind != RELKIND_TOASTVALUE)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
|
Improve error messages about mismatching relkind
Most error messages about a relkind that was not supported or
appropriate for the command was of the pattern
"relation \"%s\" is not a table, foreign table, or materialized view"
This style can become verbose and tedious to maintain. Moreover, it's
not very helpful: If I'm trying to create a comment on a TOAST table,
which is not supported, then the information that I could have created
a comment on a materialized view is pointless.
Instead, write the primary error message shorter and saying more
directly that what was attempted is not possible. Then, in the detail
message, explain that the operation is not supported for the relkind
the object was. To simplify that, add a new function
errdetail_relkind_not_supported() that does this.
In passing, make use of RELKIND_HAS_STORAGE() where appropriate,
instead of listing out the relkinds individually.
Reviewed-by: Michael Paquier <michael@paquier.xyz>
Reviewed-by: Alvaro Herrera <alvherre@alvh.no-ip.org>
Discussion: https://www.postgresql.org/message-id/flat/dc35a398-37d0-75ce-07ea-1dd71d98f8ec@2ndquadrant.com
2021-07-08 15:38:52 +08:00
|
|
|
errmsg("relation \"%s\" is of wrong relation kind",
|
|
|
|
RelationGetRelationName(rel)),
|
|
|
|
errdetail_relkind_not_supported(rel->rd_rel->relkind)));
|
2017-03-10 05:34:25 +08:00
|
|
|
}
|