mirror of
https://git.postgresql.org/git/postgresql.git
synced 2024-12-03 08:00:21 +08:00
Use errmsg_internal for debug messages
An inconsistent set of debug-level messages was not using errmsg_internal(), thus uselessly exposing the messages to translation work. Fix those.
This commit is contained in:
parent
e6b8e83b9f
commit
0e392fcc0d
@ -535,7 +535,7 @@ bt_check_every_level(Relation rel, Relation heaprel, bool heapkeyspace,
|
||||
if (metad->btm_fastroot != metad->btm_root)
|
||||
ereport(DEBUG1,
|
||||
(errcode(ERRCODE_NO_DATA),
|
||||
errmsg("harmless fast root mismatch in index %s",
|
||||
errmsg_internal("harmless fast root mismatch in index %s",
|
||||
RelationGetRelationName(rel)),
|
||||
errdetail_internal("Fast root block %u (level %u) differs from true root block %u (level %u).",
|
||||
metad->btm_fastroot, metad->btm_fastlevel,
|
||||
@ -721,7 +721,7 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
|
||||
else
|
||||
ereport(DEBUG1,
|
||||
(errcode(ERRCODE_NO_DATA),
|
||||
errmsg("block %u of index \"%s\" ignored",
|
||||
errmsg_internal("block %u of index \"%s\" ignored",
|
||||
current, RelationGetRelationName(state->rel))));
|
||||
goto nextpage;
|
||||
}
|
||||
@ -979,7 +979,7 @@ bt_recheck_sibling_links(BtreeCheckState *state,
|
||||
/* Report split in left sibling, not target (or new target) */
|
||||
ereport(DEBUG1,
|
||||
(errcode(ERRCODE_INTERNAL_ERROR),
|
||||
errmsg("harmless concurrent page split detected in index \"%s\"",
|
||||
errmsg_internal("harmless concurrent page split detected in index \"%s\"",
|
||||
RelationGetRelationName(state->rel)),
|
||||
errdetail_internal("Block=%u new right sibling=%u original right sibling=%u.",
|
||||
leftcurrent, newtargetblock,
|
||||
@ -1605,7 +1605,7 @@ bt_right_page_check_scankey(BtreeCheckState *state)
|
||||
targetnext = opaque->btpo_next;
|
||||
ereport(DEBUG1,
|
||||
(errcode(ERRCODE_NO_DATA),
|
||||
errmsg("level %u leftmost page of index \"%s\" was found deleted or half dead",
|
||||
errmsg_internal("level %u leftmost page of index \"%s\" was found deleted or half dead",
|
||||
opaque->btpo.level, RelationGetRelationName(state->rel)),
|
||||
errdetail_internal("Deleted page found when building scankey from right sibling.")));
|
||||
|
||||
@ -1733,7 +1733,7 @@ bt_right_page_check_scankey(BtreeCheckState *state)
|
||||
*/
|
||||
ereport(DEBUG1,
|
||||
(errcode(ERRCODE_NO_DATA),
|
||||
errmsg("%s block %u of index \"%s\" has no first data item",
|
||||
errmsg_internal("%s block %u of index \"%s\" has no first data item",
|
||||
P_ISLEAF(opaque) ? "leaf" : "internal", targetnext,
|
||||
RelationGetRelationName(state->rel))));
|
||||
return NULL;
|
||||
@ -2287,7 +2287,7 @@ bt_downlink_missing_check(BtreeCheckState *state, bool rightsplit,
|
||||
{
|
||||
ereport(DEBUG1,
|
||||
(errcode(ERRCODE_NO_DATA),
|
||||
errmsg("harmless interrupted page split detected in index %s",
|
||||
errmsg_internal("harmless interrupted page split detected in index %s",
|
||||
RelationGetRelationName(state->rel)),
|
||||
errdetail_internal("Block=%u level=%u left sibling=%u page lsn=%X/%X.",
|
||||
blkno, opaque->btpo.level,
|
||||
|
@ -697,7 +697,7 @@ apw_dump_now(bool is_bgworker, bool dump_unlogged)
|
||||
apw_state->pid_using_dumpfile = InvalidPid;
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("wrote block details for %d blocks", num_blocks)));
|
||||
(errmsg_internal("wrote block details for %d blocks", num_blocks)));
|
||||
return num_blocks;
|
||||
}
|
||||
|
||||
|
@ -2277,7 +2277,7 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid,
|
||||
|
||||
/* Log the info */
|
||||
ereport(DEBUG1,
|
||||
(errmsg("MultiXactId wrap limit is %u, limited by database with OID %u",
|
||||
(errmsg_internal("MultiXactId wrap limit is %u, limited by database with OID %u",
|
||||
multiWrapLimit, oldest_datoid)));
|
||||
|
||||
/*
|
||||
@ -2611,7 +2611,7 @@ SetOffsetVacuumLimit(bool is_startup)
|
||||
|
||||
if (oldestOffsetKnown)
|
||||
ereport(DEBUG1,
|
||||
(errmsg("oldest MultiXactId member is at offset %u",
|
||||
(errmsg_internal("oldest MultiXactId member is at offset %u",
|
||||
oldestOffset)));
|
||||
else
|
||||
ereport(LOG,
|
||||
@ -2640,7 +2640,7 @@ SetOffsetVacuumLimit(bool is_startup)
|
||||
(errmsg("MultiXact member wraparound protections are now enabled")));
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("MultiXact member stop limit is now %u based on MultiXact %u",
|
||||
(errmsg_internal("MultiXact member stop limit is now %u based on MultiXact %u",
|
||||
offsetStopLimit, oldestMultiXactId)));
|
||||
}
|
||||
else if (prevOldestOffsetKnown)
|
||||
|
@ -1315,7 +1315,7 @@ SlruInternalDeleteSegment(SlruCtl ctl, int segno)
|
||||
|
||||
/* Unlink the file. */
|
||||
SlruFileName(ctl, path, segno);
|
||||
ereport(DEBUG2, (errmsg("removing file \"%s\"", path)));
|
||||
ereport(DEBUG2, (errmsg_internal("removing file \"%s\"", path)));
|
||||
unlink(path);
|
||||
}
|
||||
|
||||
|
@ -425,7 +425,7 @@ SetTransactionIdLimit(TransactionId oldest_datfrozenxid, Oid oldest_datoid)
|
||||
|
||||
/* Log the info */
|
||||
ereport(DEBUG1,
|
||||
(errmsg("transaction ID wrap limit is %u, limited by database with OID %u",
|
||||
(errmsg_internal("transaction ID wrap limit is %u, limited by database with OID %u",
|
||||
xidWrapLimit, oldest_datoid)));
|
||||
|
||||
/*
|
||||
|
@ -2837,7 +2837,7 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
|
||||
minRecoveryPointTLI = newMinRecoveryPointTLI;
|
||||
|
||||
ereport(DEBUG2,
|
||||
(errmsg("updated min recovery point to %X/%X on timeline %u",
|
||||
(errmsg_internal("updated min recovery point to %X/%X on timeline %u",
|
||||
(uint32) (minRecoveryPoint >> 32),
|
||||
(uint32) minRecoveryPoint,
|
||||
newMinRecoveryPointTLI)));
|
||||
@ -4209,7 +4209,7 @@ RemoveXlogFile(const char *segname, XLogSegNo recycleSegNo,
|
||||
true, recycleSegNo, true))
|
||||
{
|
||||
ereport(DEBUG2,
|
||||
(errmsg("recycled write-ahead log file \"%s\"",
|
||||
(errmsg_internal("recycled write-ahead log file \"%s\"",
|
||||
segname)));
|
||||
CheckpointStats.ckpt_segs_recycled++;
|
||||
/* Needn't recheck that slot on future iterations */
|
||||
@ -4221,7 +4221,7 @@ RemoveXlogFile(const char *segname, XLogSegNo recycleSegNo,
|
||||
int rc;
|
||||
|
||||
ereport(DEBUG2,
|
||||
(errmsg("removing write-ahead log file \"%s\"",
|
||||
(errmsg_internal("removing write-ahead log file \"%s\"",
|
||||
segname)));
|
||||
|
||||
#ifdef WIN32
|
||||
@ -6597,7 +6597,7 @@ StartupXLOG(void)
|
||||
memcpy(&checkPoint, XLogRecGetData(xlogreader), sizeof(CheckPoint));
|
||||
wasShutdown = ((record->xl_info & ~XLR_INFO_MASK) == XLOG_CHECKPOINT_SHUTDOWN);
|
||||
ereport(DEBUG1,
|
||||
(errmsg("checkpoint record is at %X/%X",
|
||||
(errmsg_internal("checkpoint record is at %X/%X",
|
||||
(uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
|
||||
InRecovery = true; /* force recovery even if SHUTDOWNED */
|
||||
|
||||
@ -6730,7 +6730,7 @@ StartupXLOG(void)
|
||||
if (record != NULL)
|
||||
{
|
||||
ereport(DEBUG1,
|
||||
(errmsg("checkpoint record is at %X/%X",
|
||||
(errmsg_internal("checkpoint record is at %X/%X",
|
||||
(uint32) (checkPointLoc >> 32), (uint32) checkPointLoc)));
|
||||
}
|
||||
else
|
||||
@ -7118,7 +7118,7 @@ StartupXLOG(void)
|
||||
int nxids;
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("initializing for hot standby")));
|
||||
(errmsg_internal("initializing for hot standby")));
|
||||
|
||||
InitRecoveryTransactionEnvironment();
|
||||
|
||||
@ -8933,7 +8933,7 @@ CreateCheckPoint(int flags)
|
||||
WALInsertLockRelease();
|
||||
END_CRIT_SECTION();
|
||||
ereport(DEBUG1,
|
||||
(errmsg("checkpoint skipped because system is idle")));
|
||||
(errmsg_internal("checkpoint skipped because system is idle")));
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -9399,7 +9399,7 @@ CreateRestartPoint(int flags)
|
||||
if (!RecoveryInProgress())
|
||||
{
|
||||
ereport(DEBUG2,
|
||||
(errmsg("skipping restartpoint, recovery has already ended")));
|
||||
(errmsg_internal("skipping restartpoint, recovery has already ended")));
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -9421,7 +9421,7 @@ CreateRestartPoint(int flags)
|
||||
lastCheckPoint.redo <= ControlFile->checkPointCopy.redo)
|
||||
{
|
||||
ereport(DEBUG2,
|
||||
(errmsg("skipping restartpoint, already performed at %X/%X",
|
||||
(errmsg_internal("skipping restartpoint, already performed at %X/%X",
|
||||
(uint32) (lastCheckPoint.redo >> 32),
|
||||
(uint32) lastCheckPoint.redo)));
|
||||
|
||||
@ -11763,12 +11763,12 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
|
||||
*/
|
||||
if (fscanf(lfp, "START TIME: %127[^\n]\n", backuptime) == 1)
|
||||
ereport(DEBUG1,
|
||||
(errmsg("backup time %s in file \"%s\"",
|
||||
(errmsg_internal("backup time %s in file \"%s\"",
|
||||
backuptime, BACKUP_LABEL_FILE)));
|
||||
|
||||
if (fscanf(lfp, "LABEL: %1023[^\n]\n", backuplabel) == 1)
|
||||
ereport(DEBUG1,
|
||||
(errmsg("backup label %s in file \"%s\"",
|
||||
(errmsg_internal("backup label %s in file \"%s\"",
|
||||
backuplabel, BACKUP_LABEL_FILE)));
|
||||
|
||||
/*
|
||||
@ -11785,7 +11785,7 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
|
||||
tli_from_file, tli_from_walseg)));
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("backup timeline %u in file \"%s\"",
|
||||
(errmsg_internal("backup timeline %u in file \"%s\"",
|
||||
tli_from_file, BACKUP_LABEL_FILE)));
|
||||
}
|
||||
|
||||
|
@ -1198,7 +1198,7 @@ reportDependentObjects(const ObjectAddresses *targetObjects,
|
||||
* log_min_messages are different.
|
||||
*/
|
||||
ereport(DEBUG2,
|
||||
(errmsg("drop auto-cascades to %s",
|
||||
(errmsg_internal("drop auto-cascades to %s",
|
||||
objDesc)));
|
||||
}
|
||||
else if (behavior == DROP_RESTRICT)
|
||||
|
@ -3081,14 +3081,12 @@ index_build(Relation heapRelation,
|
||||
|
||||
if (indexInfo->ii_ParallelWorkers == 0)
|
||||
ereport(DEBUG1,
|
||||
(errmsg("building index \"%s\" on table \"%s\" serially",
|
||||
(errmsg_internal("building index \"%s\" on table \"%s\" serially",
|
||||
RelationGetRelationName(indexRelation),
|
||||
RelationGetRelationName(heapRelation))));
|
||||
else
|
||||
ereport(DEBUG1,
|
||||
(errmsg_plural("building index \"%s\" on table \"%s\" with request for %d parallel worker",
|
||||
"building index \"%s\" on table \"%s\" with request for %d parallel workers",
|
||||
indexInfo->ii_ParallelWorkers,
|
||||
(errmsg_internal("building index \"%s\" on table \"%s\" with request for %d parallel workers",
|
||||
RelationGetRelationName(indexRelation),
|
||||
RelationGetRelationName(heapRelation),
|
||||
indexInfo->ii_ParallelWorkers)));
|
||||
|
@ -1085,7 +1085,7 @@ DefineIndex(Oid relationId,
|
||||
}
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("%s %s will create implicit index \"%s\" for table \"%s\"",
|
||||
(errmsg_internal("%s %s will create implicit index \"%s\" for table \"%s\"",
|
||||
is_alter_table ? "ALTER TABLE / ADD" : "CREATE TABLE /",
|
||||
constraint_type,
|
||||
indexRelationName, RelationGetRelationName(rel))));
|
||||
|
@ -647,7 +647,7 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
|
||||
copy_data ? SUBREL_STATE_INIT : SUBREL_STATE_READY,
|
||||
InvalidXLogRecPtr);
|
||||
ereport(DEBUG1,
|
||||
(errmsg("table \"%s.%s\" added to subscription \"%s\"",
|
||||
(errmsg_internal("table \"%s.%s\" added to subscription \"%s\"",
|
||||
rv->schemaname, rv->relname, sub->name)));
|
||||
}
|
||||
}
|
||||
@ -721,7 +721,7 @@ AlterSubscription_refresh(Subscription *sub, bool copy_data)
|
||||
}
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("table \"%s.%s\" removed from subscription \"%s\"",
|
||||
(errmsg_internal("table \"%s.%s\" removed from subscription \"%s\"",
|
||||
get_namespace_name(get_rel_namespace(relid)),
|
||||
get_rel_name(relid),
|
||||
sub->name)));
|
||||
|
@ -5399,11 +5399,11 @@ ATRewriteTable(AlteredTableInfo *tab, Oid OIDNewHeap, LOCKMODE lockmode)
|
||||
|
||||
if (newrel)
|
||||
ereport(DEBUG1,
|
||||
(errmsg("rewriting table \"%s\"",
|
||||
(errmsg_internal("rewriting table \"%s\"",
|
||||
RelationGetRelationName(oldrel))));
|
||||
else
|
||||
ereport(DEBUG1,
|
||||
(errmsg("verifying table \"%s\"",
|
||||
(errmsg_internal("verifying table \"%s\"",
|
||||
RelationGetRelationName(oldrel))));
|
||||
|
||||
if (newrel)
|
||||
@ -7016,7 +7016,7 @@ NotNullImpliedByRelConstraints(Relation rel, Form_pg_attribute attr)
|
||||
if (ConstraintImpliedByRelConstraint(rel, list_make1(nnulltest), NIL))
|
||||
{
|
||||
ereport(DEBUG1,
|
||||
(errmsg("existing constraints on column \"%s.%s\" are sufficient to prove that it does not contain nulls",
|
||||
(errmsg_internal("existing constraints on column \"%s.%s\" are sufficient to prove that it does not contain nulls",
|
||||
RelationGetRelationName(rel), NameStr(attr->attname))));
|
||||
return true;
|
||||
}
|
||||
@ -10565,7 +10565,7 @@ validateForeignKeyConstraint(char *conname,
|
||||
MemoryContext perTupCxt;
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("validating foreign key constraint \"%s\"", conname)));
|
||||
(errmsg_internal("validating foreign key constraint \"%s\"", conname)));
|
||||
|
||||
/*
|
||||
* Build a trigger call structure; we'll need it either way.
|
||||
@ -16294,11 +16294,11 @@ QueuePartitionConstraintValidation(List **wqueue, Relation scanrel,
|
||||
{
|
||||
if (!validate_default)
|
||||
ereport(DEBUG1,
|
||||
(errmsg("partition constraint for table \"%s\" is implied by existing constraints",
|
||||
(errmsg_internal("partition constraint for table \"%s\" is implied by existing constraints",
|
||||
RelationGetRelationName(scanrel))));
|
||||
else
|
||||
ereport(DEBUG1,
|
||||
(errmsg("updated partition constraint for default partition \"%s\" is implied by existing constraints",
|
||||
(errmsg_internal("updated partition constraint for default partition \"%s\" is implied by existing constraints",
|
||||
RelationGetRelationName(scanrel))));
|
||||
return;
|
||||
}
|
||||
|
@ -768,7 +768,7 @@ llvm_compile_module(LLVMJitContext *context)
|
||||
MemoryContextSwitchTo(oldcontext);
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("time to inline: %.3fs, opt: %.3fs, emit: %.3fs",
|
||||
(errmsg_internal("time to inline: %.3fs, opt: %.3fs, emit: %.3fs",
|
||||
INSTR_TIME_GET_DOUBLE(context->base.instr.inlining_counter),
|
||||
INSTR_TIME_GET_DOUBLE(context->base.instr.optimization_counter),
|
||||
INSTR_TIME_GET_DOUBLE(context->base.instr.emission_counter)),
|
||||
|
@ -119,7 +119,7 @@ secure_open_server(Port *port)
|
||||
r = be_tls_open_server(port);
|
||||
|
||||
ereport(DEBUG2,
|
||||
(errmsg("SSL connection from \"%s\"",
|
||||
(errmsg_internal("SSL connection from \"%s\"",
|
||||
port->peer_cn ? port->peer_cn : "(anonymous)")));
|
||||
#endif
|
||||
|
||||
|
@ -443,7 +443,7 @@ generateSerialExtraStmts(CreateStmtContext *cxt, ColumnDef *column,
|
||||
}
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("%s will create implicit sequence \"%s\" for serial column \"%s.%s\"",
|
||||
(errmsg_internal("%s will create implicit sequence \"%s\" for serial column \"%s.%s\"",
|
||||
cxt->stmtType, sname,
|
||||
cxt->relation->relname, column->colname)));
|
||||
|
||||
|
@ -3144,7 +3144,7 @@ check_default_partition_contents(Relation parent, Relation default_rel,
|
||||
if (PartConstraintImpliedByRelConstraint(default_rel, def_part_constraints))
|
||||
{
|
||||
ereport(DEBUG1,
|
||||
(errmsg("updated partition constraint for default partition \"%s\" is implied by existing constraints",
|
||||
(errmsg_internal("updated partition constraint for default partition \"%s\" is implied by existing constraints",
|
||||
RelationGetRelationName(default_rel))));
|
||||
return;
|
||||
}
|
||||
@ -3195,7 +3195,7 @@ check_default_partition_contents(Relation parent, Relation default_rel,
|
||||
def_part_constraints))
|
||||
{
|
||||
ereport(DEBUG1,
|
||||
(errmsg("updated partition constraint for default partition \"%s\" is implied by existing constraints",
|
||||
(errmsg_internal("updated partition constraint for default partition \"%s\" is implied by existing constraints",
|
||||
RelationGetRelationName(part_rel))));
|
||||
|
||||
table_close(part_rel, NoLock);
|
||||
|
@ -236,12 +236,12 @@ PGSharedMemoryCreate(Size size,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("the processor does not support large pages")));
|
||||
ereport(DEBUG1,
|
||||
(errmsg("disabling huge pages")));
|
||||
(errmsg_internal("disabling huge pages")));
|
||||
}
|
||||
else if (!EnableLockPagesPrivilege(huge_pages == HUGE_PAGES_ON ? FATAL : DEBUG1))
|
||||
{
|
||||
ereport(DEBUG1,
|
||||
(errmsg("disabling huge pages")));
|
||||
(errmsg_internal("disabling huge pages")));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -443,7 +443,7 @@ AutoVacLauncherMain(int argc, char *argv[])
|
||||
init_ps_display(NULL);
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("autovacuum launcher started")));
|
||||
(errmsg_internal("autovacuum launcher started")));
|
||||
|
||||
if (PostAuthDelay)
|
||||
pg_usleep(PostAuthDelay * 1000000L);
|
||||
@ -847,7 +847,7 @@ static void
|
||||
AutoVacLauncherShutdown(void)
|
||||
{
|
||||
ereport(DEBUG1,
|
||||
(errmsg("autovacuum launcher shutting down")));
|
||||
(errmsg_internal("autovacuum launcher shutting down")));
|
||||
AutoVacuumShmem->av_launcherpid = 0;
|
||||
|
||||
proc_exit(0); /* done */
|
||||
@ -1703,7 +1703,7 @@ AutoVacWorkerMain(int argc, char *argv[])
|
||||
SetProcessingMode(NormalProcessing);
|
||||
set_ps_display(dbname);
|
||||
ereport(DEBUG1,
|
||||
(errmsg("autovacuum: processing database \"%s\"", dbname)));
|
||||
(errmsg_internal("autovacuum: processing database \"%s\"", dbname)));
|
||||
|
||||
if (PostAuthDelay)
|
||||
pg_usleep(PostAuthDelay * 1000000L);
|
||||
|
@ -402,7 +402,7 @@ BackgroundWorkerStateChange(bool allow_new_workers)
|
||||
|
||||
/* Log it! */
|
||||
ereport(DEBUG1,
|
||||
(errmsg("registering background worker \"%s\"",
|
||||
(errmsg_internal("registering background worker \"%s\"",
|
||||
rw->rw_worker.bgw_name)));
|
||||
|
||||
slist_push_head(&BackgroundWorkerList, &rw->rw_lnode);
|
||||
@ -434,7 +434,7 @@ ForgetBackgroundWorker(slist_mutable_iter *cur)
|
||||
slot->in_use = false;
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("unregistering background worker \"%s\"",
|
||||
(errmsg_internal("unregistering background worker \"%s\"",
|
||||
rw->rw_worker.bgw_name)));
|
||||
|
||||
slist_delete_current(cur);
|
||||
@ -897,7 +897,7 @@ RegisterBackgroundWorker(BackgroundWorker *worker)
|
||||
|
||||
if (!IsUnderPostmaster)
|
||||
ereport(DEBUG1,
|
||||
(errmsg("registering background worker \"%s\"", worker->bgw_name)));
|
||||
(errmsg_internal("registering background worker \"%s\"", worker->bgw_name)));
|
||||
|
||||
if (!process_shared_preload_libraries_in_progress &&
|
||||
strcmp(worker->bgw_library_name, "postgres") != 0)
|
||||
|
@ -1226,7 +1226,7 @@ CompactCheckpointerRequestQueue(void)
|
||||
CheckpointerShmem->requests[preserve_count++] = CheckpointerShmem->requests[n];
|
||||
}
|
||||
ereport(DEBUG1,
|
||||
(errmsg("compacted fsync request queue from %d entries to %d entries",
|
||||
(errmsg_internal("compacted fsync request queue from %d entries to %d entries",
|
||||
CheckpointerShmem->num_requests, preserve_count)));
|
||||
CheckpointerShmem->num_requests = preserve_count;
|
||||
|
||||
|
@ -5787,7 +5787,7 @@ do_start_bgworker(RegisteredBgWorker *rw)
|
||||
}
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("starting background worker process \"%s\"",
|
||||
(errmsg_internal("starting background worker process \"%s\"",
|
||||
rw->rw_worker.bgw_name)));
|
||||
|
||||
#ifdef EXEC_BACKEND
|
||||
|
@ -518,7 +518,7 @@ SysLoggerMain(int argc, char *argv[])
|
||||
* it DEBUG1 to suppress in normal use.
|
||||
*/
|
||||
ereport(DEBUG1,
|
||||
(errmsg("logger shutting down")));
|
||||
(errmsg_internal("logger shutting down")));
|
||||
|
||||
/*
|
||||
* Normal exit from the syslogger is here. Note that we
|
||||
|
@ -282,7 +282,7 @@ logicalrep_worker_launch(Oid dbid, Oid subid, const char *subname, Oid userid,
|
||||
TimestampTz now;
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("starting logical replication worker for subscription \"%s\"",
|
||||
(errmsg_internal("starting logical replication worker for subscription \"%s\"",
|
||||
subname)));
|
||||
|
||||
/* Report this after the initial starting message for consistency. */
|
||||
@ -805,7 +805,7 @@ ApplyLauncherMain(Datum main_arg)
|
||||
TimestampTz last_start_time = 0;
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("logical replication launcher started")));
|
||||
(errmsg_internal("logical replication launcher started")));
|
||||
|
||||
before_shmem_exit(logicalrep_launcher_onexit, (Datum) 0);
|
||||
|
||||
|
@ -425,7 +425,7 @@ SyncRepInitConfig(void)
|
||||
SpinLockRelease(&MyWalSnd->mutex);
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("standby \"%s\" now has synchronous standby priority %u",
|
||||
(errmsg_internal("standby \"%s\" now has synchronous standby priority %u",
|
||||
application_name, priority)));
|
||||
}
|
||||
}
|
||||
|
@ -2319,7 +2319,7 @@ WalSndLoop(WalSndSendDataCallback send_data)
|
||||
if (MyWalSnd->state == WALSNDSTATE_CATCHUP)
|
||||
{
|
||||
ereport(DEBUG1,
|
||||
(errmsg("\"%s\" has now caught up with upstream server",
|
||||
(errmsg_internal("\"%s\" has now caught up with upstream server",
|
||||
application_name)));
|
||||
WalSndSetState(WALSNDSTATE_STREAMING);
|
||||
}
|
||||
|
@ -1602,7 +1602,7 @@ GetSafeSnapshot(Snapshot origSnapshot)
|
||||
/* else, need to retry... */
|
||||
ereport(DEBUG2,
|
||||
(errcode(ERRCODE_T_R_SERIALIZATION_FAILURE),
|
||||
errmsg("deferrable snapshot was unsafe; trying a new one")));
|
||||
errmsg_internal("deferrable snapshot was unsafe; trying a new one")));
|
||||
ReleasePredicateLocks(false, false);
|
||||
}
|
||||
|
||||
|
@ -1413,13 +1413,13 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
|
||||
initStringInfo(&logbuf);
|
||||
DescribeLockTag(&locktagbuf, &locktag_copy);
|
||||
appendStringInfo(&logbuf,
|
||||
_("Process %d waits for %s on %s."),
|
||||
"Process %d waits for %s on %s.",
|
||||
MyProcPid,
|
||||
GetLockmodeName(lockmethod_copy, lockmode),
|
||||
locktagbuf.data);
|
||||
|
||||
ereport(DEBUG1,
|
||||
(errmsg("sending cancel to blocking autovacuum PID %d",
|
||||
(errmsg_internal("sending cancel to blocking autovacuum PID %d",
|
||||
pid),
|
||||
errdetail_log("%s", logbuf.data)));
|
||||
|
||||
|
@ -983,7 +983,7 @@ register_dirty_segment(SMgrRelation reln, ForkNumber forknum, MdfdVec *seg)
|
||||
if (!RegisterSyncRequest(&tag, SYNC_REQUEST, false /* retryOnError */ ))
|
||||
{
|
||||
ereport(DEBUG1,
|
||||
(errmsg("could not forward fsync request because request queue is full")));
|
||||
(errmsg_internal("could not forward fsync request because request queue is full")));
|
||||
|
||||
if (FileSync(seg->mdfd_vfd, WAIT_EVENT_DATA_FILE_SYNC) < 0)
|
||||
ereport(data_sync_elevel(ERROR),
|
||||
|
@ -419,7 +419,7 @@ ProcessSyncRequests(void)
|
||||
else
|
||||
ereport(DEBUG1,
|
||||
(errcode_for_file_access(),
|
||||
errmsg("could not fsync file \"%s\" but retrying: %m",
|
||||
errmsg_internal("could not fsync file \"%s\" but retrying: %m",
|
||||
path)));
|
||||
|
||||
/*
|
||||
|
@ -353,7 +353,7 @@ SocketBackend(StringInfo inBuf)
|
||||
whereToSendOutput = DestNone;
|
||||
ereport(DEBUG1,
|
||||
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
|
||||
errmsg("unexpected EOF on client connection")));
|
||||
errmsg_internal("unexpected EOF on client connection")));
|
||||
}
|
||||
return qtype;
|
||||
}
|
||||
@ -389,7 +389,7 @@ SocketBackend(StringInfo inBuf)
|
||||
whereToSendOutput = DestNone;
|
||||
ereport(DEBUG1,
|
||||
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
|
||||
errmsg("unexpected EOF on client connection")));
|
||||
errmsg_internal("unexpected EOF on client connection")));
|
||||
}
|
||||
return EOF;
|
||||
}
|
||||
@ -416,7 +416,7 @@ SocketBackend(StringInfo inBuf)
|
||||
whereToSendOutput = DestNone;
|
||||
ereport(DEBUG1,
|
||||
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
|
||||
errmsg("unexpected EOF on client connection")));
|
||||
errmsg_internal("unexpected EOF on client connection")));
|
||||
}
|
||||
return EOF;
|
||||
}
|
||||
@ -1375,7 +1375,7 @@ exec_parse_message(const char *query_string, /* string to execute */
|
||||
ResetUsage();
|
||||
|
||||
ereport(DEBUG2,
|
||||
(errmsg("parse %s: %s",
|
||||
(errmsg_internal("parse %s: %s",
|
||||
*stmt_name ? stmt_name : "<unnamed>",
|
||||
query_string)));
|
||||
|
||||
@ -1631,7 +1631,7 @@ exec_bind_message(StringInfo input_message)
|
||||
stmt_name = pq_getmsgstring(input_message);
|
||||
|
||||
ereport(DEBUG2,
|
||||
(errmsg("bind %s to %s",
|
||||
(errmsg_internal("bind %s to %s",
|
||||
*portal_name ? portal_name : "<unnamed>",
|
||||
*stmt_name ? stmt_name : "<unnamed>")));
|
||||
|
||||
@ -3092,7 +3092,7 @@ ProcessInterrupts(void)
|
||||
else if (IsLogicalLauncher())
|
||||
{
|
||||
ereport(DEBUG1,
|
||||
(errmsg("logical replication launcher shutting down")));
|
||||
(errmsg_internal("logical replication launcher shutting down")));
|
||||
|
||||
/*
|
||||
* The logical replication launcher can be stopped at any time.
|
||||
|
@ -1641,7 +1641,7 @@ load_libraries(const char *libraries, const char *gucname, bool restricted)
|
||||
}
|
||||
load_file(filename, restricted);
|
||||
ereport(DEBUG1,
|
||||
(errmsg("loaded library \"%s\"", filename)));
|
||||
(errmsg_internal("loaded library \"%s\"", filename)));
|
||||
if (expanded)
|
||||
pfree(expanded);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user