Remove TRACE_SORT macro

The TRACE_SORT macro guarded the availability of the trace_sort GUC
setting.  But it has been enabled by default ever since it was
introduced in PostgreSQL 8.1, and there have been no reports that
someone wanted to disable it.  So just remove the macro to simplify
things.  (For the avoidance of doubt: The trace_sort GUC is still
there.  This only removes the rarely-used macro guarding it.)

Reviewed-by: Heikki Linnakangas <hlinnaka@iki.fi>
Discussion: https://www.postgresql.org/message-id/flat/be5f7162-7c1d-44e3-9a78-74dcaa6529f2%40eisentraut.org
This commit is contained in:
Peter Eisentraut 2024-08-14 08:02:32 +02:00
parent bf3401fe81
commit c8e2d422fd
11 changed files with 0 additions and 95 deletions

View File

@ -11711,9 +11711,6 @@ dynamic_library_path = 'C:\tools\postgresql;H:\my_project\lib;$libdir'
<listitem>
<para>
If on, emit information about resource usage during sort operations.
This parameter is only available if the <symbol>TRACE_SORT</symbol> macro
was defined when <productname>PostgreSQL</productname> was compiled.
(However, <symbol>TRACE_SORT</symbol> is currently defined by default.)
</para>
</listitem>
</varlistentry>

View File

@ -430,13 +430,11 @@ macaddr_abbrev_abort(int memtupcount, SortSupport ssup)
*/
if (abbr_card > 100000.0)
{
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"macaddr_abbrev: estimation ends at cardinality %f"
" after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count, memtupcount);
#endif
uss->estimating = false;
return false;
}
@ -449,23 +447,19 @@ macaddr_abbrev_abort(int memtupcount, SortSupport ssup)
*/
if (abbr_card < uss->input_count / 2000.0 + 0.5)
{
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"macaddr_abbrev: aborting abbreviation at cardinality %f"
" below threshold %f after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count,
memtupcount);
#endif
return true;
}
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"macaddr_abbrev: cardinality %f after " INT64_FORMAT
" values (%d rows)", abbr_card, uss->input_count, memtupcount);
#endif
return false;
}

View File

@ -503,13 +503,11 @@ network_abbrev_abort(int memtupcount, SortSupport ssup)
*/
if (abbr_card > 100000.0)
{
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"network_abbrev: estimation ends at cardinality %f"
" after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count, memtupcount);
#endif
uss->estimating = false;
return false;
}
@ -522,23 +520,19 @@ network_abbrev_abort(int memtupcount, SortSupport ssup)
*/
if (abbr_card < uss->input_count / 2000.0 + 0.5)
{
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"network_abbrev: aborting abbreviation at cardinality %f"
" below threshold %f after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count,
memtupcount);
#endif
return true;
}
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"network_abbrev: cardinality %f after " INT64_FORMAT
" values (%d rows)", abbr_card, uss->input_count, memtupcount);
#endif
return false;
}

View File

@ -2127,13 +2127,11 @@ numeric_abbrev_abort(int memtupcount, SortSupport ssup)
*/
if (abbr_card > 100000.0)
{
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"numeric_abbrev: estimation ends at cardinality %f"
" after " INT64_FORMAT " values (%d rows)",
abbr_card, nss->input_count, memtupcount);
#endif
nss->estimating = false;
return false;
}
@ -2149,24 +2147,20 @@ numeric_abbrev_abort(int memtupcount, SortSupport ssup)
*/
if (abbr_card < nss->input_count / 10000.0 + 0.5)
{
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"numeric_abbrev: aborting abbreviation at cardinality %f"
" below threshold %f after " INT64_FORMAT " values (%d rows)",
abbr_card, nss->input_count / 10000.0 + 0.5,
nss->input_count, memtupcount);
#endif
return true;
}
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"numeric_abbrev: cardinality %f"
" after " INT64_FORMAT " values (%d rows)",
abbr_card, nss->input_count, memtupcount);
#endif
return false;
}

View File

@ -307,13 +307,11 @@ uuid_abbrev_abort(int memtupcount, SortSupport ssup)
*/
if (abbr_card > 100000.0)
{
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"uuid_abbrev: estimation ends at cardinality %f"
" after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count, memtupcount);
#endif
uss->estimating = false;
return false;
}
@ -326,23 +324,19 @@ uuid_abbrev_abort(int memtupcount, SortSupport ssup)
*/
if (abbr_card < uss->input_count / 2000.0 + 0.5)
{
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"uuid_abbrev: aborting abbreviation at cardinality %f"
" below threshold %f after " INT64_FORMAT " values (%d rows)",
abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count,
memtupcount);
#endif
return true;
}
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"uuid_abbrev: cardinality %f after " INT64_FORMAT
" values (%d rows)", abbr_card, uss->input_count, memtupcount);
#endif
return false;
}

View File

@ -2455,7 +2455,6 @@ varstr_abbrev_abort(int memtupcount, SortSupport ssup)
* time there are differences within full key strings not captured in
* abbreviations.
*/
#ifdef TRACE_SORT
if (trace_sort)
{
double norm_abbrev_card = abbrev_distinct / (double) memtupcount;
@ -2465,7 +2464,6 @@ varstr_abbrev_abort(int memtupcount, SortSupport ssup)
memtupcount, abbrev_distinct, key_distinct, norm_abbrev_card,
sss->prop_card);
}
#endif
/*
* If the number of distinct abbreviated keys approximately matches the
@ -2527,12 +2525,10 @@ varstr_abbrev_abort(int memtupcount, SortSupport ssup)
* of moderately high to high abbreviated cardinality. There is little to
* lose but much to gain, which our strategy reflects.
*/
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "varstr_abbrev: aborted abbreviation at %d "
"(abbrev_distinct: %f, key_distinct: %f, prop_card: %f)",
memtupcount, abbrev_distinct, key_distinct, sss->prop_card);
#endif
return true;
}

View File

@ -1698,7 +1698,6 @@ struct config_bool ConfigureNamesBool[] =
NULL, NULL, NULL
},
#ifdef TRACE_SORT
{
{"trace_sort", PGC_USERSET, DEVELOPER_OPTIONS,
gettext_noop("Emit information about resource usage in sorting."),
@ -1709,7 +1708,6 @@ struct config_bool ConfigureNamesBool[] =
false,
NULL, NULL, NULL
},
#endif
#ifdef TRACE_SYNCSCAN
/* this is undocumented because not exposed in a standard build */

View File

@ -121,9 +121,7 @@
ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1)
/* GUC variables */
#ifdef TRACE_SORT
bool trace_sort = false;
#endif
#ifdef DEBUG_BOUNDED_SORT
bool optimize_bounded_sort = true;
@ -335,9 +333,7 @@ struct Tuplesortstate
/*
* Resource snapshot for time of sort start.
*/
#ifdef TRACE_SORT
PGRUsage ru_start;
#endif
};
/*
@ -683,10 +679,8 @@ tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt)
state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate));
#ifdef TRACE_SORT
if (trace_sort)
pg_rusage_init(&state->ru_start);
#endif
state->base.sortopt = sortopt;
state->base.tuples = true;
@ -904,22 +898,16 @@ tuplesort_free(Tuplesortstate *state)
{
/* context swap probably not needed, but let's be safe */
MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
#ifdef TRACE_SORT
int64 spaceUsed;
if (state->tapeset)
spaceUsed = LogicalTapeSetBlocks(state->tapeset);
else
spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024;
#endif
/*
* Delete temporary "tape" files, if any.
*
* Note: want to include this in reported total cost of sort, hence need
* for two #ifdef TRACE_SORT sections.
*
* We don't bother to destroy the individual tapes here. They will go away
* with the sortcontext. (In TSS_FINALMERGE state, we have closed
* finished tapes already.)
@ -927,7 +915,6 @@ tuplesort_free(Tuplesortstate *state)
if (state->tapeset)
LogicalTapeSetClose(state->tapeset);
#ifdef TRACE_SORT
if (trace_sort)
{
if (state->tapeset)
@ -941,14 +928,6 @@ tuplesort_free(Tuplesortstate *state)
}
TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed);
#else
/*
* If you disabled TRACE_SORT, you can still probe sort__done, but you
* ain't getting space-used stats.
*/
TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L);
#endif
FREESTATE(state);
MemoryContextSwitchTo(oldcontext);
@ -1263,12 +1242,10 @@ tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple,
(state->memtupcount > state->bound * 2 ||
(state->memtupcount > state->bound && LACKMEM(state))))
{
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "switching to bounded heapsort at %d tuples: %s",
state->memtupcount,
pg_rusage_show(&state->ru_start));
#endif
make_bounded_heap(state);
MemoryContextSwitchTo(oldcontext);
return;
@ -1387,11 +1364,9 @@ tuplesort_performsort(Tuplesortstate *state)
{
MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext);
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "performsort of worker %d starting: %s",
state->worker, pg_rusage_show(&state->ru_start));
#endif
switch (state->status)
{
@ -1470,7 +1445,6 @@ tuplesort_performsort(Tuplesortstate *state)
break;
}
#ifdef TRACE_SORT
if (trace_sort)
{
if (state->status == TSS_FINALMERGE)
@ -1481,7 +1455,6 @@ tuplesort_performsort(Tuplesortstate *state)
elog(LOG, "performsort of worker %d done: %s",
state->worker, pg_rusage_show(&state->ru_start));
}
#endif
MemoryContextSwitchTo(oldcontext);
}
@ -1905,11 +1878,9 @@ inittapes(Tuplesortstate *state, bool mergeruns)
state->maxTapes = MINORDER;
}
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "worker %d switching to external sort with %d tapes: %s",
state->worker, state->maxTapes, pg_rusage_show(&state->ru_start));
#endif
/* Create the tape set */
inittapestate(state, state->maxTapes);
@ -2118,11 +2089,9 @@ mergeruns(Tuplesortstate *state)
*/
state->tape_buffer_mem = state->availMem;
USEMEM(state, state->tape_buffer_mem);
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "worker %d using %zu KB of memory for tape buffers",
state->worker, state->tape_buffer_mem / 1024);
#endif
for (;;)
{
@ -2167,12 +2136,10 @@ mergeruns(Tuplesortstate *state)
state->nInputRuns,
state->maxTapes);
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "starting merge pass of %d input runs on %d tapes, " INT64_FORMAT " KB of memory for each input tape: %s",
state->nInputRuns, state->nInputTapes, input_buffer_size / 1024,
pg_rusage_show(&state->ru_start));
#endif
/* Prepare the new input tapes for merge pass. */
for (tapenum = 0; tapenum < state->nInputTapes; tapenum++)
@ -2378,12 +2345,10 @@ dumptuples(Tuplesortstate *state, bool alltuples)
state->currentRun++;
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "worker %d starting quicksort of run %d: %s",
state->worker, state->currentRun,
pg_rusage_show(&state->ru_start));
#endif
/*
* Sort all tuples accumulated within the allowed amount of memory for
@ -2391,12 +2356,10 @@ dumptuples(Tuplesortstate *state, bool alltuples)
*/
tuplesort_sort_memtuples(state);
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "worker %d finished quicksort of run %d: %s",
state->worker, state->currentRun,
pg_rusage_show(&state->ru_start));
#endif
memtupwrite = state->memtupcount;
for (i = 0; i < memtupwrite; i++)
@ -2426,12 +2389,10 @@ dumptuples(Tuplesortstate *state, bool alltuples)
markrunend(state->destTape);
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG, "worker %d finished writing run %d to tape %d: %s",
state->worker, state->currentRun, (state->currentRun - 1) % state->nOutputTapes + 1,
pg_rusage_show(&state->ru_start));
#endif
}
/*

View File

@ -181,12 +181,10 @@ tuplesort_begin_heap(TupleDesc tupDesc,
Assert(nkeys > 0);
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
nkeys, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
#endif
base->nKeys = nkeys;
@ -258,13 +256,11 @@ tuplesort_begin_cluster(TupleDesc tupDesc,
oldcontext = MemoryContextSwitchTo(base->maincontext);
arg = (TuplesortClusterArg *) palloc0(sizeof(TuplesortClusterArg));
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c",
RelationGetNumberOfAttributes(indexRel),
workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
#endif
base->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel);
@ -368,13 +364,11 @@ tuplesort_begin_index_btree(Relation heapRel,
oldcontext = MemoryContextSwitchTo(base->maincontext);
arg = (TuplesortIndexBTreeArg *) palloc(sizeof(TuplesortIndexBTreeArg));
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin index sort: unique = %c, workMem = %d, randomAccess = %c",
enforceUnique ? 't' : 'f',
workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
#endif
base->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel);
@ -452,7 +446,6 @@ tuplesort_begin_index_hash(Relation heapRel,
oldcontext = MemoryContextSwitchTo(base->maincontext);
arg = (TuplesortIndexHashArg *) palloc(sizeof(TuplesortIndexHashArg));
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin index sort: high_mask = 0x%x, low_mask = 0x%x, "
@ -462,7 +455,6 @@ tuplesort_begin_index_hash(Relation heapRel,
max_buckets,
workMem,
sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
#endif
base->nKeys = 1; /* Only one sort column, the hash code */
@ -503,12 +495,10 @@ tuplesort_begin_index_gist(Relation heapRel,
oldcontext = MemoryContextSwitchTo(base->maincontext);
arg = (TuplesortIndexBTreeArg *) palloc(sizeof(TuplesortIndexBTreeArg));
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin index sort: workMem = %d, randomAccess = %c",
workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
#endif
base->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel);
@ -560,13 +550,11 @@ tuplesort_begin_index_brin(int workMem,
sortopt);
TuplesortPublic *base = TuplesortstateGetPublic(state);
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin index sort: workMem = %d, randomAccess = %c",
workMem,
sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
#endif
base->nKeys = 1; /* Only one sort column, the block number */
@ -596,12 +584,10 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation,
oldcontext = MemoryContextSwitchTo(base->maincontext);
arg = (TuplesortDatumArg *) palloc(sizeof(TuplesortDatumArg));
#ifdef TRACE_SORT
if (trace_sort)
elog(LOG,
"begin datum sort: workMem = %d, randomAccess = %c",
workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f');
#endif
base->nKeys = 1; /* always a one-column sort */

View File

@ -358,12 +358,6 @@
*/
/* #define WAL_DEBUG */
/*
* Enable tracing of resource consumption during sort operations;
* see also the trace_sort GUC var. For 8.1 this is enabled by default.
*/
#define TRACE_SORT 1
/*
* Enable tracing of syncscan operations (see also the trace_syncscan GUC var).
*/

View File

@ -295,10 +295,7 @@ extern PGDLLIMPORT int tcp_user_timeout;
extern PGDLLIMPORT char *role_string;
extern PGDLLIMPORT bool in_hot_standby_guc;
#ifdef TRACE_SORT
extern PGDLLIMPORT bool trace_sort;
#endif
#ifdef DEBUG_BOUNDED_SORT
extern PGDLLIMPORT bool optimize_bounded_sort;