mirror of
https://git.postgresql.org/git/postgresql.git
synced 2024-11-27 07:21:09 +08:00
Fix many typos and inconsistencies
Author: Alexander Lakhin Discussion: https://postgr.es/m/af27d1b3-a128-9d62-46e0-88f424397f44@gmail.com
This commit is contained in:
parent
459c3cdb4a
commit
c74d49d41c
@ -341,7 +341,7 @@ BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple)
|
||||
/*
|
||||
* Allocate a new page (either by recycling, or by extending the index file)
|
||||
* The returned buffer is already pinned and exclusive-locked
|
||||
* Caller is responsible for initializing the page by calling BloomInitBuffer
|
||||
* Caller is responsible for initializing the page by calling BloomInitPage
|
||||
*/
|
||||
Buffer
|
||||
BloomNewBuffer(Relation index)
|
||||
|
@ -43,7 +43,7 @@ static int compare_val_int4(const void *a, const void *b);
|
||||
*
|
||||
* The default array selectivity operators for the @>, && and @< operators
|
||||
* work fine for integer arrays. However, if we tried to just use arraycontsel
|
||||
* and arracontjoinsel directly as the cost estimator functions for our
|
||||
* and arraycontjoinsel directly as the cost estimator functions for our
|
||||
* operators, they would not work as intended, because they look at the
|
||||
* operator's OID. Our operators behave exactly like the built-in anyarray
|
||||
* versions, but we must tell the cost estimator functions which built-in
|
||||
|
@ -441,9 +441,9 @@ typedef struct
|
||||
struct TrgmPackedGraph
|
||||
{
|
||||
/*
|
||||
* colorTrigramsCount and colorTrigramsGroups contain information about
|
||||
* how trigrams are grouped into color trigrams. "colorTrigramsCount" is
|
||||
* the count of color trigrams and "colorTrigramGroups" contains number of
|
||||
* colorTrigramsCount and colorTrigramGroups contain information about how
|
||||
* trigrams are grouped into color trigrams. "colorTrigramsCount" is the
|
||||
* count of color trigrams and "colorTrigramGroups" contains number of
|
||||
* simple trigrams for each color trigram. The array of simple trigrams
|
||||
* (stored separately from this struct) is ordered so that the simple
|
||||
* trigrams for each color trigram are consecutive, and they're in order
|
||||
|
@ -11,7 +11,7 @@
|
||||
* binaries of libcrypt exportable from the USA
|
||||
*
|
||||
* Adapted for FreeBSD-4.0 by Mark R V Murray
|
||||
* this file should now *only* export crypt_des(), in order to make
|
||||
* this file should now *only* export px_crypt_des(), in order to make
|
||||
* a module that can be optionally included in libcrypt.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
|
@ -665,7 +665,7 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1
|
||||
Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = ANY (ARRAY[c2, 1, ("C 1" + 0)])))
|
||||
(3 rows)
|
||||
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- ArrayRef
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- SubscriptingRef
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------------------------
|
||||
Foreign Scan on public.ft1 t1
|
||||
|
@ -297,7 +297,7 @@ EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = -c1; -- Op
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE 1 = c1!; -- OpExpr(r)
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE (c1 IS NOT NULL) IS DISTINCT FROM (c1 IS NOT NULL); -- DistinctExpr
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = ANY(ARRAY[c2, 1, c1 + 0]); -- ScalarArrayOpExpr
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- ArrayRef
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c1 = (ARRAY[c1,c2,3])[1]; -- SubscriptingRef
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c6 = E'foo''s\\bar'; -- check special chars
|
||||
EXPLAIN (VERBOSE, COSTS OFF) SELECT * FROM ft1 t1 WHERE c8 = 'foo'; -- can't be sent to remote
|
||||
-- parameterized remote path for foreign table
|
||||
|
@ -87,9 +87,9 @@ WHERE c.relname = 'customer' AND
|
||||
c2.oid = i.indexrelid
|
||||
ORDER BY c2.relname;
|
||||
|
||||
relname | relpages
|
||||
----------------------+----------
|
||||
customer_id_indexdex | 26
|
||||
relname | relpages
|
||||
-------------------+----------
|
||||
customer_id_index | 26
|
||||
</programlisting>
|
||||
</para>
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* brin_tuples.c
|
||||
* brin_tuple.c
|
||||
* Method implementations for tuples in BRIN indexes.
|
||||
*
|
||||
* Intended usage is that code outside this file only deals with
|
||||
@ -207,7 +207,7 @@ brin_form_tuple(BrinDesc *brdesc, BlockNumber blkno, BrinMemTuple *tuple,
|
||||
/*
|
||||
* Note that we reverse the sense of null bits in this module: we
|
||||
* store a 1 for a null attribute rather than a 0. So we must reverse
|
||||
* the sense of the att_isnull test in br_deconstruct_tuple as well.
|
||||
* the sense of the att_isnull test in brin_deconstruct_tuple as well.
|
||||
*/
|
||||
bitP = ((bits8 *) ((char *) rettuple + SizeOfBrinTuple)) - 1;
|
||||
bitmask = HIGHBIT;
|
||||
|
@ -621,8 +621,8 @@ btparallelrescan(IndexScanDesc scan)
|
||||
|
||||
/*
|
||||
* _bt_parallel_seize() -- Begin the process of advancing the scan to a new
|
||||
* page. Other scans must wait until we call bt_parallel_release() or
|
||||
* bt_parallel_done().
|
||||
* page. Other scans must wait until we call _bt_parallel_release()
|
||||
* or _bt_parallel_done().
|
||||
*
|
||||
* The return value is true if we successfully seized the scan and false
|
||||
* if we did not. The latter case occurs if no pages remain for the current
|
||||
|
@ -181,7 +181,7 @@ btree_xlog_insert(bool isleaf, bool ismeta, XLogReaderState *record)
|
||||
|
||||
if (PageAddItem(page, (Item) datapos, datalen, xlrec->offnum,
|
||||
false, false) == InvalidOffsetNumber)
|
||||
elog(PANIC, "btree_insert_redo: failed to add item");
|
||||
elog(PANIC, "btree_xlog_insert: failed to add item");
|
||||
|
||||
PageSetLSN(page, lsn);
|
||||
MarkBufferDirty(buffer);
|
||||
|
@ -1588,9 +1588,8 @@ RemoveAttributeById(Oid relid, AttrNumber attnum)
|
||||
/*
|
||||
* Grab an exclusive lock on the target table, which we will NOT release
|
||||
* until end of transaction. (In the simple case where we are directly
|
||||
* dropping this column, AlterTableDropColumn already did this ... but
|
||||
* when cascading from a drop of some other object, we may not have any
|
||||
* lock.)
|
||||
* dropping this column, ATExecDropColumn already did this ... but when
|
||||
* cascading from a drop of some other object, we may not have any lock.)
|
||||
*/
|
||||
rel = relation_open(relid, AccessExclusiveLock);
|
||||
|
||||
|
@ -1200,12 +1200,12 @@ ExecInitExprRec(Expr *node, ExprState *state,
|
||||
* field's values[]/nulls[] entries as both the caseval
|
||||
* source and the result address for this subexpression.
|
||||
* That's okay only because (1) both FieldStore and
|
||||
* ArrayRef evaluate their arg or refexpr inputs first,
|
||||
* and (2) any such CaseTestExpr is directly the arg or
|
||||
* refexpr input. So any read of the caseval will occur
|
||||
* before there's a chance to overwrite it. Also, if
|
||||
* multiple entries in the newvals/fieldnums lists target
|
||||
* the same field, they'll effectively be applied
|
||||
* SubscriptingRef evaluate their arg or refexpr inputs
|
||||
* first, and (2) any such CaseTestExpr is directly the
|
||||
* arg or refexpr input. So any read of the caseval will
|
||||
* occur before there's a chance to overwrite it. Also,
|
||||
* if multiple entries in the newvals/fieldnums lists
|
||||
* target the same field, they'll effectively be applied
|
||||
* left-to-right which is what we want.
|
||||
*/
|
||||
save_innermost_caseval = state->innermost_caseval;
|
||||
|
@ -62,7 +62,7 @@
|
||||
* A non-null entry is a pointer to a LargeObjectDesc allocated in the
|
||||
* LO private memory context "fscxt". The cookies array itself is also
|
||||
* dynamically allocated in that context. Its current allocated size is
|
||||
* cookies_len entries, of which any unused entries will be NULL.
|
||||
* cookies_size entries, of which any unused entries will be NULL.
|
||||
*/
|
||||
static LargeObjectDesc **cookies = NULL;
|
||||
static int cookies_size = 0;
|
||||
|
@ -934,7 +934,7 @@ tbm_extract_page_tuple(PagetableEntry *page, TBMIterateResult *output)
|
||||
}
|
||||
|
||||
/*
|
||||
* tbm_advance_schunkbit - Advance the chunkbit
|
||||
* tbm_advance_schunkbit - Advance the schunkbit
|
||||
*/
|
||||
static inline void
|
||||
tbm_advance_schunkbit(PagetableEntry *chunk, int *schunkbitp)
|
||||
|
@ -929,11 +929,10 @@ generate_base_implied_equalities_no_const(PlannerInfo *root,
|
||||
/*
|
||||
* We scan the EC members once and track the last-seen member for each
|
||||
* base relation. When we see another member of the same base relation,
|
||||
* we generate "prev_mem = cur_mem". This results in the minimum number
|
||||
* of derived clauses, but it's possible that it will fail when a
|
||||
* different ordering would succeed. XXX FIXME: use a UNION-FIND
|
||||
* algorithm similar to the way we build merged ECs. (Use a list-of-lists
|
||||
* for each rel.)
|
||||
* we generate "prev_em = cur_em". This results in the minimum number of
|
||||
* derived clauses, but it's possible that it will fail when a different
|
||||
* ordering would succeed. XXX FIXME: use a UNION-FIND algorithm similar
|
||||
* to the way we build merged ECs. (Use a list-of-lists for each rel.)
|
||||
*/
|
||||
prev_ems = (EquivalenceMember **)
|
||||
palloc0(root->simple_rel_array_size * sizeof(EquivalenceMember *));
|
||||
|
@ -3911,7 +3911,7 @@ create_foreignscan_plan(PlannerInfo *root, ForeignPath *best_path,
|
||||
}
|
||||
|
||||
/*
|
||||
* create_custom_plan
|
||||
* create_customscan_plan
|
||||
*
|
||||
* Transform a CustomPath into a Plan.
|
||||
*/
|
||||
|
@ -3409,10 +3409,10 @@ eval_const_expressions_mutator(Node *node,
|
||||
{
|
||||
/*
|
||||
* This case could be folded into the generic handling used
|
||||
* for ArrayRef etc. But because the simplification logic is
|
||||
* so trivial, applying evaluate_expr() to perform it would be
|
||||
* a heavy overhead. BooleanTest is probably common enough to
|
||||
* justify keeping this bespoke implementation.
|
||||
* for SubscriptingRef etc. But because the simplification
|
||||
* logic is so trivial, applying evaluate_expr() to perform it
|
||||
* would be a heavy overhead. BooleanTest is probably common
|
||||
* enough to justify keeping this bespoke implementation.
|
||||
*/
|
||||
BooleanTest *btest = (BooleanTest *) node;
|
||||
BooleanTest *newbtest;
|
||||
|
@ -2082,7 +2082,7 @@ transformSetOperationTree(ParseState *pstate, SelectStmt *stmt,
|
||||
* Select common collation. A common collation is required for
|
||||
* all set operators except UNION ALL; see SQL:2008 7.13 <query
|
||||
* expression> Syntax Rule 15c. (If we fail to identify a common
|
||||
* collation for a UNION ALL column, the curCollations element
|
||||
* collation for a UNION ALL column, the colCollations element
|
||||
* will be set to InvalidOid, which may result in a runtime error
|
||||
* if something at a higher query level wants to use the column's
|
||||
* collation.)
|
||||
|
@ -12542,7 +12542,7 @@ SimpleTypename:
|
||||
* Note that ConstInterval is not included here since it must
|
||||
* be pushed up higher in the rules to accommodate the postfix
|
||||
* options (e.g. INTERVAL '1' YEAR). Likewise, we have to handle
|
||||
* the generic-type-name case in AExprConst to avoid premature
|
||||
* the generic-type-name case in AexprConst to avoid premature
|
||||
* reduce/reduce conflicts against function names.
|
||||
*/
|
||||
ConstTypename:
|
||||
|
@ -695,9 +695,9 @@ transformAssignmentIndirection(ParseState *pstate,
|
||||
/*
|
||||
* Set up a substitution. We abuse CaseTestExpr for this. It's safe
|
||||
* to do so because the only nodes that will be above the CaseTestExpr
|
||||
* in the finished expression will be FieldStore and ArrayRef nodes.
|
||||
* (There could be other stuff in the tree, but it will be within
|
||||
* other child fields of those node types.)
|
||||
* in the finished expression will be FieldStore and SubscriptingRef
|
||||
* nodes. (There could be other stuff in the tree, but it will be
|
||||
* within other child fields of those node types.)
|
||||
*/
|
||||
CaseTestExpr *ctest = makeNode(CaseTestExpr);
|
||||
|
||||
|
@ -525,7 +525,7 @@ ResetBackgroundWorkerCrashTimes(void)
|
||||
if (rw->rw_worker.bgw_restart_time == BGW_NEVER_RESTART)
|
||||
{
|
||||
/*
|
||||
* Workers marked BGW_NVER_RESTART shouldn't get relaunched after
|
||||
* Workers marked BGW_NEVER_RESTART shouldn't get relaunched after
|
||||
* the crash, so forget about them. (If we wait until after the
|
||||
* crash to forget about them, and they are parallel workers,
|
||||
* parallel_terminate_count will get incremented after we've
|
||||
|
@ -114,7 +114,7 @@ CheckLogicalDecodingRequirements(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Helper function for CreateInitialDecodingContext() and
|
||||
* Helper function for CreateInitDecodingContext() and
|
||||
* CreateDecodingContext() performing common tasks.
|
||||
*/
|
||||
static LogicalDecodingContext *
|
||||
|
@ -1334,7 +1334,9 @@ SaveSlotToPath(ReplicationSlot *slot, const char *dir, int elevel)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check CreateSlot() for the reasoning of using a crit. section. */
|
||||
/*
|
||||
* Check CreateSlotOnDisk() for the reasoning of using a critical section.
|
||||
*/
|
||||
START_CRIT_SECTION();
|
||||
|
||||
fsync_fname(path, false);
|
||||
|
@ -980,11 +980,11 @@ process_matched_tle(TargetEntry *src_tle,
|
||||
*
|
||||
* As a further complication, the destination column might be a domain,
|
||||
* resulting in each assignment containing a CoerceToDomain node over a
|
||||
* FieldStore or ArrayRef. These should have matching target domains,
|
||||
* so we strip them and reconstitute a single CoerceToDomain over the
|
||||
* combined FieldStore/ArrayRef nodes. (Notice that this has the result
|
||||
* that the domain's checks are applied only after we do all the field or
|
||||
* element updates, not after each one. This is arguably desirable.)
|
||||
* FieldStore or SubscriptingRef. These should have matching target
|
||||
* domains, so we strip them and reconstitute a single CoerceToDomain over
|
||||
* the combined FieldStore/SubscriptingRef nodes. (Notice that this has the
|
||||
* result that the domain's checks are applied only after we do all the
|
||||
* field or element updates, not after each one. This is arguably desirable.)
|
||||
*----------
|
||||
*/
|
||||
src_expr = (Node *) src_tle->expr;
|
||||
|
@ -220,7 +220,7 @@ StrategyGetBuffer(BufferAccessStrategy strategy, uint32 *buf_state)
|
||||
* If asked, we need to waken the bgwriter. Since we don't want to rely on
|
||||
* a spinlock for this we force a read from shared memory once, and then
|
||||
* set the latch based on that value. We need to go through that length
|
||||
* because otherwise bgprocno might be reset while/after we check because
|
||||
* because otherwise bgwprocno might be reset while/after we check because
|
||||
* the compiler might just reread from memory.
|
||||
*
|
||||
* This can possibly set the latch of the wrong process if the bgwriter
|
||||
|
@ -537,7 +537,7 @@ GetLocalBufferStorage(void)
|
||||
/*
|
||||
* CheckForLocalBufferLeaks - ensure this backend holds no local buffer pins
|
||||
*
|
||||
* This is just like CheckBufferLeaks(), but for local buffers.
|
||||
* This is just like CheckForBufferLeaks(), but for local buffers.
|
||||
*/
|
||||
static void
|
||||
CheckForLocalBufferLeaks(void)
|
||||
|
@ -226,9 +226,9 @@ BarrierAttach(Barrier *barrier)
|
||||
}
|
||||
|
||||
/*
|
||||
* Detach from a barrier. This may release other waiters from BarrierWait and
|
||||
* advance the phase if they were only waiting for this backend. Return true
|
||||
* if this participant was the last to detach.
|
||||
* Detach from a barrier. This may release other waiters from
|
||||
* BarrierArriveAndWait() and advance the phase if they were only waiting for
|
||||
* this backend. Return true if this participant was the last to detach.
|
||||
*/
|
||||
bool
|
||||
BarrierDetach(Barrier *barrier)
|
||||
|
@ -14,7 +14,7 @@
|
||||
#
|
||||
#-------------------------------------------------------------------------
|
||||
|
||||
# turn off perlcritic for autogened code
|
||||
# turn off perlcritic for autogenerated code
|
||||
## no critic
|
||||
|
||||
$0 =~ s/^.*?(\w+)[\.\w+]*$/$1/;
|
||||
|
@ -1322,7 +1322,7 @@ array_recv(PG_FUNCTION_ARGS)
|
||||
lBound[i] = pq_getmsgint(buf, 4);
|
||||
|
||||
/*
|
||||
* Check overflow of upper bound. (ArrayNItems() below checks that
|
||||
* Check overflow of upper bound. (ArrayGetNItems() below checks that
|
||||
* dim[i] >= 0)
|
||||
*/
|
||||
if (dim[i] != 0)
|
||||
|
@ -1374,7 +1374,7 @@ time_scale(PG_FUNCTION_ARGS)
|
||||
|
||||
/* AdjustTimeForTypmod()
|
||||
* Force the precision of the time value to a specified value.
|
||||
* Uses *exactly* the same code as in AdjustTimestampForTypemod()
|
||||
* Uses *exactly* the same code as in AdjustTimestampForTypmod()
|
||||
* but we make a separate copy because those types do not
|
||||
* have a fundamental tie together but rather a coincidence of
|
||||
* implementation. - thomas
|
||||
|
@ -3029,7 +3029,7 @@ DecodeSpecial(int field, char *lowtoken, int *val)
|
||||
}
|
||||
|
||||
|
||||
/* ClearPgTM
|
||||
/* ClearPgTm
|
||||
*
|
||||
* Zero out a pg_tm and associated fsec_t
|
||||
*/
|
||||
|
@ -527,7 +527,7 @@ dotrim(const char *string, int stringlen,
|
||||
*
|
||||
* Syntax:
|
||||
*
|
||||
* bytea byteatrim(byta string, bytea set)
|
||||
* bytea byteatrim(bytea string, bytea set)
|
||||
*
|
||||
* Purpose:
|
||||
*
|
||||
|
2
src/backend/utils/cache/plancache.c
vendored
2
src/backend/utils/cache/plancache.c
vendored
@ -504,7 +504,7 @@ DropCachedPlan(CachedPlanSource *plansource)
|
||||
plansource->is_saved = false;
|
||||
}
|
||||
|
||||
/* Decrement generic CachePlan's refcount and drop if no longer needed */
|
||||
/* Decrement generic CachedPlan's refcount and drop if no longer needed */
|
||||
ReleaseGenericPlan(plansource);
|
||||
|
||||
/* Mark it no longer valid */
|
||||
|
@ -2258,7 +2258,7 @@ check_for_freed_segments(dsa_area *area)
|
||||
}
|
||||
|
||||
/*
|
||||
* Workhorse for check_for_free_segments(), and also used directly in path
|
||||
* Workhorse for check_for_freed_segments(), and also used directly in path
|
||||
* where the area lock is already held. This should be called after acquiring
|
||||
* the lock but before looking up any segment by index number, to make sure we
|
||||
* unmap any stale segments that might have previously had the same index as a
|
||||
|
@ -231,7 +231,7 @@ FreePageManagerGet(FreePageManager *fpm, Size npages, Size *first_page)
|
||||
|
||||
/*
|
||||
* FreePageManagerGetInternal may have set contiguous_pages_dirty.
|
||||
* Recompute contigous_pages if so.
|
||||
* Recompute contiguous_pages if so.
|
||||
*/
|
||||
FreePageManagerUpdateLargest(fpm);
|
||||
|
||||
|
@ -55,7 +55,7 @@ typedef int16 AttrNumber;
|
||||
)
|
||||
|
||||
/*
|
||||
* AttributeOffsetGetAttributeNumber
|
||||
* AttrOffsetGetAttrNumber
|
||||
* Returns the attribute number for an attribute offset.
|
||||
*/
|
||||
#define AttrOffsetGetAttrNumber(attributeOffset) \
|
||||
|
@ -457,7 +457,7 @@ extern void hashbucketcleanup(Relation rel, Bucket cur_bucket,
|
||||
BufferAccessStrategy bstrategy,
|
||||
uint32 maxbucket, uint32 highmask, uint32 lowmask,
|
||||
double *tuples_removed, double *num_index_tuples,
|
||||
bool bucket_has_garbage,
|
||||
bool split_cleanup,
|
||||
IndexBulkDeleteCallback callback, void *callback_state);
|
||||
|
||||
#endif /* HASH_H */
|
||||
|
@ -40,7 +40,7 @@ typedef struct EventTriggerData
|
||||
((fcinfo)->context != NULL && IsA((fcinfo)->context, EventTriggerData))
|
||||
|
||||
extern Oid CreateEventTrigger(CreateEventTrigStmt *stmt);
|
||||
extern void RemoveEventTriggerById(Oid ctrigOid);
|
||||
extern void RemoveEventTriggerById(Oid trigOid);
|
||||
extern Oid get_event_trigger_oid(const char *trigname, bool missing_ok);
|
||||
|
||||
extern Oid AlterEventTrigger(AlterEventTrigStmt *stmt);
|
||||
|
@ -51,6 +51,4 @@ extern char *get_extension_name(Oid ext_oid);
|
||||
extern ObjectAddress AlterExtensionNamespace(const char *extensionName, const char *newschema,
|
||||
Oid *oldschema);
|
||||
|
||||
extern void AlterExtensionOwner_oid(Oid extensionOid, Oid newOwnerId);
|
||||
|
||||
#endif /* EXTENSION_H */
|
||||
|
@ -18,7 +18,7 @@
|
||||
/*
|
||||
* General executor code
|
||||
*/
|
||||
extern CustomScanState *ExecInitCustomScan(CustomScan *custom_scan,
|
||||
extern CustomScanState *ExecInitCustomScan(CustomScan *cscan,
|
||||
EState *estate, int eflags);
|
||||
extern void ExecEndCustomScan(CustomScanState *node);
|
||||
|
||||
|
@ -252,7 +252,7 @@ extern Datum HeapTupleHeaderGetDatum(HeapTupleHeader tuple);
|
||||
* oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
|
||||
* <user defined code>
|
||||
* <if returning composite>
|
||||
* <build TupleDesc, and perhaps AttInMetaData>
|
||||
* <build TupleDesc, and perhaps AttInMetadata>
|
||||
* <endif returning composite>
|
||||
* <user defined code>
|
||||
* // return to original context when allocating transient memory
|
||||
|
@ -940,7 +940,7 @@ typedef struct CaseWhen
|
||||
* We also abuse this node type for some other purposes, including:
|
||||
* * Placeholder for the current array element value in ArrayCoerceExpr;
|
||||
* see build_coercion_expression().
|
||||
* * Nested FieldStore/ArrayRef assignment expressions in INSERT/UPDATE;
|
||||
* * Nested FieldStore/SubscriptingRef assignment expressions in INSERT/UPDATE;
|
||||
* see transformAssignmentIndirection().
|
||||
*
|
||||
* The uses in CaseExpr and ArrayCoerceExpr are safe only to the extent that
|
||||
@ -950,7 +950,7 @@ typedef struct CaseWhen
|
||||
* break it.
|
||||
*
|
||||
* The nested-assignment-expression case is safe because the only node types
|
||||
* that can be above such CaseTestExprs are FieldStore and ArrayRef.
|
||||
* that can be above such CaseTestExprs are FieldStore and SubscriptingRef.
|
||||
*/
|
||||
typedef struct CaseTestExpr
|
||||
{
|
||||
|
@ -86,8 +86,6 @@ extern void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
|
||||
RelOptInfo *baserel, ParamPathInfo *param_info);
|
||||
extern void cost_functionscan(Path *path, PlannerInfo *root,
|
||||
RelOptInfo *baserel, ParamPathInfo *param_info);
|
||||
extern void cost_tableexprscan(Path *path, PlannerInfo *root,
|
||||
RelOptInfo *baserel, ParamPathInfo *param_info);
|
||||
extern void cost_valuesscan(Path *path, PlannerInfo *root,
|
||||
RelOptInfo *baserel, ParamPathInfo *param_info);
|
||||
extern void cost_tablefuncscan(Path *path, PlannerInfo *root,
|
||||
|
@ -96,8 +96,6 @@ extern SubqueryScanPath *create_subqueryscan_path(PlannerInfo *root,
|
||||
List *pathkeys, Relids required_outer);
|
||||
extern Path *create_functionscan_path(PlannerInfo *root, RelOptInfo *rel,
|
||||
List *pathkeys, Relids required_outer);
|
||||
extern Path *create_tablexprscan_path(PlannerInfo *root, RelOptInfo *rel,
|
||||
List *pathkeys, Relids required_outer);
|
||||
extern Path *create_valuesscan_path(PlannerInfo *root, RelOptInfo *rel,
|
||||
Relids required_outer);
|
||||
extern Path *create_tablefuncscan_path(PlannerInfo *root, RelOptInfo *rel,
|
||||
|
@ -33,7 +33,7 @@
|
||||
* no way for them to share kernel file descriptors with other files.
|
||||
*
|
||||
* Likewise, use AllocateDir/FreeDir, not opendir/closedir, to allocate
|
||||
* open directories (DIR*), and OpenTransientFile/CloseTransient File for an
|
||||
* open directories (DIR*), and OpenTransientFile/CloseTransientFile for an
|
||||
* unbuffered file descriptor.
|
||||
*/
|
||||
#ifndef FD_H
|
||||
|
@ -63,7 +63,7 @@ struct XidCache
|
||||
(PROC_IN_VACUUM | PROC_IN_ANALYZE | PROC_VACUUM_FOR_WRAPAROUND)
|
||||
|
||||
/*
|
||||
* We allow a small number of "weak" relation locks (AccesShareLock,
|
||||
* We allow a small number of "weak" relation locks (AccessShareLock,
|
||||
* RowShareLock, RowExclusiveLock) to be recorded in the PGPROC structure
|
||||
* rather than the main lock table. This eases contention on the lock
|
||||
* manager LWLocks. See storage/lmgr/README for additional details.
|
||||
|
@ -9,11 +9,6 @@
|
||||
*
|
||||
* src/include/tcop/tcopprot.h
|
||||
*
|
||||
* OLD COMMENTS
|
||||
* This file was created so that other c files could get the two
|
||||
* function prototypes without having to include tcop.h which single
|
||||
* handedly includes the whole f*cking tree -- mer 5 Nov. 1991
|
||||
*
|
||||
*-------------------------------------------------------------------------
|
||||
*/
|
||||
#ifndef TCOPPROT_H
|
||||
|
@ -29,8 +29,6 @@ extern void AtEOXact_Inval(bool isCommit);
|
||||
|
||||
extern void AtEOSubXact_Inval(bool isCommit);
|
||||
|
||||
extern void AtPrepare_Inval(void);
|
||||
|
||||
extern void PostPrepare_Inval(void);
|
||||
|
||||
extern void CommandEndInvalidationMessages(void);
|
||||
|
@ -184,8 +184,8 @@ typedef struct SortSupportData
|
||||
/*
|
||||
* Full, authoritative comparator for key that an abbreviated
|
||||
* representation was generated for, used when an abbreviated comparison
|
||||
* was inconclusive (by calling ApplySortComparatorFull()), or used to
|
||||
* replace "comparator" when core system ultimately decides against
|
||||
* was inconclusive (by calling ApplySortAbbrevFullComparator()), or used
|
||||
* to replace "comparator" when core system ultimately decides against
|
||||
* abbreviation.
|
||||
*/
|
||||
int (*abbrev_full_comparator) (Datum x, Datum y, SortSupport ssup);
|
||||
|
@ -717,7 +717,7 @@ pg_password_sendauth(PGconn *conn, const char *password, AuthRequest areq)
|
||||
const char *pwd_to_send;
|
||||
char md5Salt[4];
|
||||
|
||||
/* Read the salt from the AuthenticationMD5 message. */
|
||||
/* Read the salt from the AuthenticationMD5Password message. */
|
||||
if (areq == AUTH_REQ_MD5)
|
||||
{
|
||||
if (pqGetnchar(md5Salt, 4, conn))
|
||||
@ -897,7 +897,7 @@ pg_fe_sendauth(AuthRequest areq, int payloadlen, PGconn *conn)
|
||||
/*
|
||||
* No SSPI support. However, if we have GSSAPI but not SSPI
|
||||
* support, AUTH_REQ_SSPI will have been handled in the codepath
|
||||
* for AUTH_REQ_GSSAPI above, so don't duplicate the case label in
|
||||
* for AUTH_REQ_GSS above, so don't duplicate the case label in
|
||||
* that case.
|
||||
*/
|
||||
#if !defined(ENABLE_GSS)
|
||||
|
Loading…
Reference in New Issue
Block a user