mirror of
https://git.postgresql.org/git/postgresql.git
synced 2024-12-09 08:10:09 +08:00
Fix some typos, grammar and style in docs and comments
The portions fixing the documentation are backpatched where needed. Author: Justin Pryzby Discussion: https://postgr.es/m/20210210235557.GQ20012@telsasoft.com backpatch-through: 9.6
This commit is contained in:
parent
8ec8fe0f31
commit
bcf2667bf6
@ -338,7 +338,7 @@ tuple_data_split_internal(Oid relid, char *tupdata,
|
||||
attr = TupleDescAttr(tupdesc, i);
|
||||
|
||||
/*
|
||||
* Tuple header can specify less attributes than tuple descriptor as
|
||||
* Tuple header can specify fewer attributes than tuple descriptor as
|
||||
* ALTER TABLE ADD COLUMN without DEFAULT keyword does not actually
|
||||
* change tuples in pages, so attributes with numbers greater than
|
||||
* (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
|
||||
|
@ -619,7 +619,7 @@ SELECT * FROM test1 ORDER BY a || b COLLATE "fr_FR";
|
||||
name such as <literal>de_DE</literal> can be considered unique
|
||||
within a given database even though it would not be unique globally.
|
||||
Use of the stripped collation names is recommended, since it will
|
||||
make one less thing you need to change if you decide to change to
|
||||
make one fewer thing you need to change if you decide to change to
|
||||
another database encoding. Note however that the <literal>default</literal>,
|
||||
<literal>C</literal>, and <literal>POSIX</literal> collations can be used regardless of
|
||||
the database encoding.
|
||||
|
@ -448,7 +448,7 @@
|
||||
of <type>anycompatible</type> and <type>anycompatiblenonarray</type>
|
||||
inputs, the array element types of <type>anycompatiblearray</type>
|
||||
inputs, the range subtypes of <type>anycompatiblerange</type> inputs,
|
||||
and the multirange subtypes of <type>anycompatiablemultirange</type>
|
||||
and the multirange subtypes of <type>anycompatiblemultirange</type>
|
||||
inputs. If <type>anycompatiblenonarray</type> is present then the
|
||||
common type is required to be a non-array type. Once a common type is
|
||||
identified, arguments in <type>anycompatible</type>
|
||||
|
@ -626,7 +626,7 @@ ExecForeignBatchInsert(EState *estate,
|
||||
Insert multiple tuples in bulk into the foreign table.
|
||||
The parameters are the same for <function>ExecForeignInsert</function>
|
||||
except <literal>slots</literal> and <literal>planSlots</literal> contain
|
||||
multiple tuples and <literal>*numSlots></literal> specifies the number of
|
||||
multiple tuples and <literal>*numSlots</literal> specifies the number of
|
||||
tuples in those arrays.
|
||||
</para>
|
||||
|
||||
@ -655,7 +655,7 @@ ExecForeignBatchInsert(EState *estate,
|
||||
<literal>NULL</literal>, attempts to insert into the foreign table will
|
||||
use <function>ExecForeignInsert</function>.
|
||||
This function is not used if the <command>INSERT</command> has the
|
||||
<literal>RETURNING></literal> clause.
|
||||
<literal>RETURNING</literal> clause.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@ -672,9 +672,8 @@ GetForeignModifyBatchSize(ResultRelInfo *rinfo);
|
||||
|
||||
Report the maximum number of tuples that a single
|
||||
<function>ExecForeignBatchInsert</function> call can handle for
|
||||
the specified foreign table. That is, The executor passes at most
|
||||
the number of tuples that this function returns to
|
||||
<function>ExecForeignBatchInsert</function>.
|
||||
the specified foreign table. The executor passes at most
|
||||
the given number of tuples to <function>ExecForeignBatchInsert</function>.
|
||||
<literal>rinfo</literal> is the <structname>ResultRelInfo</structname> struct describing
|
||||
the target foreign table.
|
||||
The FDW is expected to provide a foreign server and/or foreign
|
||||
|
@ -791,9 +791,9 @@ typedef void (*LogicalDecodeMessageCB) (struct LogicalDecodingContext *ctx,
|
||||
<para>
|
||||
The optional <function>filter_prepare_cb</function> callback
|
||||
is called to determine whether data that is part of the current
|
||||
two-phase commit transaction should be considered for decode
|
||||
at this prepare stage or as a regular one-phase transaction at
|
||||
<command>COMMIT PREPARED</command> time later. To signal that
|
||||
two-phase commit transaction should be considered for decoding
|
||||
at this prepare stage or later as a regular one-phase transaction at
|
||||
<command>COMMIT PREPARED</command> time. To signal that
|
||||
decoding should be skipped, return <literal>true</literal>;
|
||||
<literal>false</literal> otherwise. When the callback is not
|
||||
defined, <literal>false</literal> is assumed (i.e. nothing is
|
||||
@ -820,12 +820,12 @@ typedef bool (*LogicalDecodeFilterPrepareCB) (struct LogicalDecodingContext *ctx
|
||||
The required <function>begin_prepare_cb</function> callback is called
|
||||
whenever the start of a prepared transaction has been decoded. The
|
||||
<parameter>gid</parameter> field, which is part of the
|
||||
<parameter>txn</parameter> parameter can be used in this callback to
|
||||
check if the plugin has already received this prepare in which case it
|
||||
can skip the remaining changes of the transaction. This can only happen
|
||||
if the user restarts the decoding after receiving the prepare for a
|
||||
transaction but before receiving the commit prepared say because of some
|
||||
error.
|
||||
<parameter>txn</parameter> parameter, can be used in this callback to
|
||||
check if the plugin has already received this <command>PREPARE</command>
|
||||
in which case it can skip the remaining changes of the transaction.
|
||||
This can only happen if the user restarts the decoding after receiving
|
||||
the <command>PREPARE</command> for a transaction but before receiving
|
||||
the <command>COMMIT PREPARED</command>, say because of some error.
|
||||
<programlisting>
|
||||
typedef void (*LogicalDecodeBeginPrepareCB) (struct LogicalDecodingContext *ctx,
|
||||
ReorderBufferTXN *txn);
|
||||
@ -842,7 +842,7 @@ typedef bool (*LogicalDecodeFilterPrepareCB) (struct LogicalDecodingContext *ctx
|
||||
decoded. The <function>change_cb</function> callback for all modified
|
||||
rows will have been called before this, if there have been any modified
|
||||
rows. The <parameter>gid</parameter> field, which is part of the
|
||||
<parameter>txn</parameter> parameter can be used in this callback.
|
||||
<parameter>txn</parameter> parameter, can be used in this callback.
|
||||
<programlisting>
|
||||
typedef void (*LogicalDecodePrepareCB) (struct LogicalDecodingContext *ctx,
|
||||
ReorderBufferTXN *txn,
|
||||
@ -856,9 +856,9 @@ typedef bool (*LogicalDecodeFilterPrepareCB) (struct LogicalDecodingContext *ctx
|
||||
|
||||
<para>
|
||||
The required <function>commit_prepared_cb</function> callback is called
|
||||
whenever a transaction commit prepared has been decoded. The
|
||||
<parameter>gid</parameter> field, which is part of the
|
||||
<parameter>txn</parameter> parameter can be used in this callback.
|
||||
whenever a transaction <command>COMMIT PREPARED</command> has been decoded.
|
||||
The <parameter>gid</parameter> field, which is part of the
|
||||
<parameter>txn</parameter> parameter, can be used in this callback.
|
||||
<programlisting>
|
||||
typedef void (*LogicalDecodeCommitPreparedCB) (struct LogicalDecodingContext *ctx,
|
||||
ReorderBufferTXN *txn,
|
||||
@ -872,15 +872,15 @@ typedef bool (*LogicalDecodeFilterPrepareCB) (struct LogicalDecodingContext *ctx
|
||||
|
||||
<para>
|
||||
The required <function>rollback_prepared_cb</function> callback is called
|
||||
whenever a transaction rollback prepared has been decoded. The
|
||||
<parameter>gid</parameter> field, which is part of the
|
||||
<parameter>txn</parameter> parameter can be used in this callback. The
|
||||
whenever a transaction <command>ROLLBACK PREPARED</command> has been
|
||||
decoded. The <parameter>gid</parameter> field, which is part of the
|
||||
<parameter>txn</parameter> parameter, can be used in this callback. The
|
||||
parameters <parameter>prepare_end_lsn</parameter> and
|
||||
<parameter>prepare_time</parameter> can be used to check if the plugin
|
||||
has received this prepare transaction in which case it can apply the
|
||||
rollback, otherwise, it can skip the rollback operation. The
|
||||
has received this <command>PREPARE TRANSACTION</command> in which case
|
||||
it can apply the rollback, otherwise, it can skip the rollback operation. The
|
||||
<parameter>gid</parameter> alone is not sufficient because the downstream
|
||||
node can have prepared transaction with same identifier.
|
||||
node can have a prepared transaction with same identifier.
|
||||
<programlisting>
|
||||
typedef void (*LogicalDecodeRollbackPreparedCB) (struct LogicalDecodingContext *ctx,
|
||||
ReorderBufferTXN *txn,
|
||||
@ -1122,7 +1122,8 @@ OutputPluginWrite(ctx, true);
|
||||
the <function>stream_commit_cb</function> callback
|
||||
(or possibly aborted using the <function>stream_abort_cb</function> callback).
|
||||
If two-phase commits are supported, the transaction can be prepared using the
|
||||
<function>stream_prepare_cb</function> callback, commit prepared using the
|
||||
<function>stream_prepare_cb</function> callback,
|
||||
<command>COMMIT PREPARED</command> using the
|
||||
<function>commit_prepared_cb</function> callback or aborted using the
|
||||
<function>rollback_prepared_cb</function>.
|
||||
</para>
|
||||
@ -1214,7 +1215,7 @@ stream_commit_cb(...); <-- commit of the streamed transaction
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When a prepared transaction is rollbacked using the
|
||||
When a prepared transaction is rolled back using the
|
||||
<command>ROLLBACK PREPARED</command>, then the
|
||||
<function>rollback_prepared_cb</function> callback is invoked and when the
|
||||
prepared transaction is committed using <command>COMMIT PREPARED</command>,
|
||||
|
@ -211,7 +211,7 @@ test=# SELECT tuple_data_split('pg_class'::regclass, t_data, t_infomask, t_infom
|
||||
</para>
|
||||
<para>
|
||||
If <parameter>do_detoast</parameter> is <literal>true</literal>,
|
||||
attribute that will be detoasted as needed. Default value is
|
||||
attributes will be detoasted as needed. Default value is
|
||||
<literal>false</literal>.
|
||||
</para>
|
||||
</listitem>
|
||||
|
@ -553,7 +553,7 @@ postgres=# SELECT postgres_fdw_disconnect('loopback1');
|
||||
<para>
|
||||
This function discards all the open connections that are established by
|
||||
<filename>postgres_fdw</filename> from the local session to
|
||||
the foreign servers. If the connections are used in the current local
|
||||
foreign servers. If the connections are used in the current local
|
||||
transaction, they are not disconnected and warning messages are reported.
|
||||
This function returns <literal>true</literal> if it disconnects
|
||||
at least one connection, otherwise <literal>false</literal>.
|
||||
@ -585,22 +585,22 @@ postgres=# SELECT postgres_fdw_disconnect_all();
|
||||
|
||||
<para>
|
||||
When changing the definition of or removing a foreign server or
|
||||
a user mapping, the corresponding connections are closed.
|
||||
But note that if the connections are used in the current local transaction
|
||||
at that moment, they are kept until the end of the transaction.
|
||||
Closed connections will be established again when they are necessary
|
||||
by subsequent queries using a foreign table.
|
||||
a user mapping, the associated connections are closed.
|
||||
But note that if any connections are in use in the current local transaction,
|
||||
they are kept until the end of the transaction.
|
||||
Closed connections will be re-established when they are necessary
|
||||
by future queries using a foreign table.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Once a connection to a foreign server has been established,
|
||||
it's usually kept until the local or the corresponding remote
|
||||
it's usually kept until the local or corresponding remote
|
||||
session exits. To disconnect a connection explicitly,
|
||||
<function>postgres_fdw_disconnect</function> and
|
||||
<function>postgres_fdw_disconnect_all</function> functions
|
||||
need to be used. For example, these are useful when closing
|
||||
the connections that are no longer necessary and then preventing them
|
||||
from consuming the foreign server connections capacity too much.
|
||||
may be used. For example, these are useful to close
|
||||
connections that are no longer necessary, thereby releasing
|
||||
connections on the foreign server.
|
||||
</para>
|
||||
</sect2>
|
||||
|
||||
|
@ -6928,8 +6928,8 @@ Delete
|
||||
</term>
|
||||
<listitem>
|
||||
<para>
|
||||
Identifies the following TupleData message as a old tuple.
|
||||
This field is present if the table in which the delete has
|
||||
Identifies the following TupleData message as an old tuple.
|
||||
This field is present if the table in which the delete
|
||||
happened has REPLICA IDENTITY set to FULL.
|
||||
</para>
|
||||
</listitem>
|
||||
|
@ -56,7 +56,7 @@ ALTER SUBSCRIPTION <replaceable class="parameter">name</replaceable> RENAME TO <
|
||||
allocated for the subscription on the remote host are released. If due to
|
||||
network breakdown or some other error, <productname>PostgreSQL</productname>
|
||||
is unable to remove the slots, an ERROR will be reported. To proceed in this
|
||||
situation, either the user need to retry the operation or disassociate the
|
||||
situation, the user either needs to retry the operation or disassociate the
|
||||
slot from the subscription and drop the subscription as explained in
|
||||
<xref linkend="sql-dropsubscription"/>.
|
||||
</para>
|
||||
|
@ -867,7 +867,7 @@ CREATE TYPE <replaceable class="parameter">name</replaceable>
|
||||
Before <productname>PostgreSQL</productname> version 8.3, the name of
|
||||
a generated array type was always exactly the element type's name with one
|
||||
underscore character (<literal>_</literal>) prepended. (Type names were
|
||||
therefore restricted in length to one less character than other names.)
|
||||
therefore restricted in length to one fewer character than other names.)
|
||||
While this is still usually the case, the array type name may vary from
|
||||
this in case of maximum-length names or collisions with user type names
|
||||
that begin with underscore. Writing code that depends on this convention
|
||||
|
@ -45,7 +45,7 @@ DROP INDEX [ CONCURRENTLY ] [ IF EXISTS ] <replaceable class="parameter">name</r
|
||||
<para>
|
||||
Drop the index without locking out concurrent selects, inserts, updates,
|
||||
and deletes on the index's table. A normal <command>DROP INDEX</command>
|
||||
acquires exclusive lock on the table, blocking other accesses until the
|
||||
acquires an exclusive lock on the table, blocking other accesses until the
|
||||
index drop can be completed. With this option, the command instead
|
||||
waits until conflicting transactions have completed.
|
||||
</para>
|
||||
|
@ -1266,7 +1266,7 @@ CREATE [ OR REPLACE ] RULE <replaceable class="parameter">name</replaceable> AS
|
||||
<para>
|
||||
The query trees generated from rule actions are thrown into the
|
||||
rewrite system again, and maybe more rules get applied resulting
|
||||
in more or less query trees.
|
||||
in additional or fewer query trees.
|
||||
So a rule's actions must have either a different
|
||||
command type or a different result relation than the rule itself is
|
||||
on, otherwise this recursive process will end up in an infinite loop.
|
||||
|
@ -237,19 +237,19 @@
|
||||
</indexterm>
|
||||
|
||||
<para>
|
||||
Data pages are not checksum protected by default, but this can optionally be
|
||||
enabled for a cluster. When enabled, each data page will be assigned a
|
||||
checksum that is updated when the page is written and verified every time
|
||||
the page is read. Only data pages are protected by checksums, internal data
|
||||
By default, data pages are not protected by checksums, but this can optionally be
|
||||
enabled for a cluster. When enabled, each data page will be ASSIGNED a
|
||||
checksum that is updated when the page is written and verified each time
|
||||
the page is read. Only data pages are protected by checksums; internal data
|
||||
structures and temporary files are not.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
Checksums are normally enabled when the cluster is initialized using <link
|
||||
Checksums verification is normally ENABLED when the cluster is initialized using <link
|
||||
linkend="app-initdb-data-checksums"><application>initdb</application></link>.
|
||||
They can also be enabled or disabled at a later time as an offline
|
||||
operation. Data checksums are enabled or disabled at the full cluster
|
||||
level, and cannot be specified individually for databases or tables.
|
||||
level, and cannot be specified for individual databases or tables.
|
||||
</para>
|
||||
|
||||
<para>
|
||||
@ -260,9 +260,9 @@
|
||||
</para>
|
||||
|
||||
<para>
|
||||
When attempting to recover from corrupt data it may be necessary to bypass
|
||||
the checksum protection in order to recover data. To do this, temporarily
|
||||
set the configuration parameter <xref linkend="guc-ignore-checksum-failure" />.
|
||||
When attempting to recover from corrupt data, it may be necessary to bypass
|
||||
the checksum protection. To do this, temporarily set the configuration
|
||||
parameter <xref linkend="guc-ignore-checksum-failure" />.
|
||||
</para>
|
||||
|
||||
<sect2 id="checksums-offline-enable-disable">
|
||||
|
@ -719,11 +719,11 @@ heap_copytuple_with_tuple(HeapTuple src, HeapTuple dest)
|
||||
}
|
||||
|
||||
/*
|
||||
* Expand a tuple which has less attributes than required. For each attribute
|
||||
* Expand a tuple which has fewer attributes than required. For each attribute
|
||||
* not present in the sourceTuple, if there is a missing value that will be
|
||||
* used. Otherwise the attribute will be set to NULL.
|
||||
*
|
||||
* The source tuple must have less attributes than the required number.
|
||||
* The source tuple must have fewer attributes than the required number.
|
||||
*
|
||||
* Only one of targetHeapTuple and targetMinimalTuple may be supplied. The
|
||||
* other argument must be NULL.
|
||||
|
@ -433,8 +433,7 @@ XLogReadBufferForRedoExtended(XLogReaderState *record,
|
||||
* NB: A redo function should normally not call this directly. To get a page
|
||||
* to modify, use XLogReadBufferForRedoExtended instead. It is important that
|
||||
* all pages modified by a WAL record are registered in the WAL records, or
|
||||
* they will be invisible to tools that that need to know which pages are
|
||||
* modified.
|
||||
* they will be invisible to tools that need to know which pages are modified.
|
||||
*/
|
||||
Buffer
|
||||
XLogReadBufferExtended(RelFileNode rnode, ForkNumber forknum,
|
||||
|
@ -186,7 +186,7 @@ get_am_oid(const char *amname, bool missing_ok)
|
||||
}
|
||||
|
||||
/*
|
||||
* get_am_name - given an access method OID name and type, look up its name.
|
||||
* get_am_name - given an access method OID, look up its name.
|
||||
*/
|
||||
char *
|
||||
get_am_name(Oid amOid)
|
||||
|
@ -2070,8 +2070,7 @@ initialize_hash_entry(AggState *aggstate, TupleHashTable hashtable,
|
||||
}
|
||||
|
||||
/*
|
||||
* Look up hash entries for the current tuple in all hashed grouping sets,
|
||||
* returning an array of pergroup pointers suitable for advance_aggregates.
|
||||
* Look up hash entries for the current tuple in all hashed grouping sets.
|
||||
*
|
||||
* Be aware that lookup_hash_entry can reset the tmpcontext.
|
||||
*
|
||||
|
@ -71,7 +71,7 @@ static Datum GetAggInitVal(Datum textInitVal, Oid transtype);
|
||||
*
|
||||
* Information about the aggregates and transition functions are collected
|
||||
* in the root->agginfos and root->aggtransinfos lists. The 'aggtranstype',
|
||||
* 'aggno', and 'aggtransno' fields in are filled in in each Aggref.
|
||||
* 'aggno', and 'aggtransno' fields of each Aggref are filled in.
|
||||
*
|
||||
* NOTE: This modifies the Aggrefs in the input expression in-place!
|
||||
*
|
||||
|
@ -2075,7 +2075,7 @@ GetSnapshotDataReuse(Snapshot snapshot)
|
||||
* holding ProcArrayLock) exclusively). Thus the xactCompletionCount check
|
||||
* ensures we would detect if the snapshot would have changed.
|
||||
*
|
||||
* As the snapshot contents are the same as it was before, it is is safe
|
||||
* As the snapshot contents are the same as it was before, it is safe
|
||||
* to re-enter the snapshot's xmin into the PGPROC array. None of the rows
|
||||
* visible under the snapshot could already have been removed (that'd
|
||||
* require the set of running transactions to change) and it fulfills the
|
||||
|
@ -263,7 +263,7 @@ static int compareDatetime(Datum val1, Oid typid1, Datum val2, Oid typid2,
|
||||
* implement @? and @@ operators, which in turn are intended to have an
|
||||
* index support. Thus, it's desirable to make it easier to achieve
|
||||
* consistency between index scan results and sequential scan results.
|
||||
* So, we throw as less errors as possible. Regarding this function,
|
||||
* So, we throw as few errors as possible. Regarding this function,
|
||||
* such behavior also matches behavior of JSON_EXISTS() clause of
|
||||
* SQL/JSON. Regarding jsonb_path_match(), this function doesn't have
|
||||
* an analogy in SQL/JSON, so we define its behavior on our own.
|
||||
|
@ -645,7 +645,7 @@ scalarineqsel(PlannerInfo *root, Oid operator, bool isgt, bool iseq,
|
||||
|
||||
/*
|
||||
* The calculation so far gave us a selectivity for the "<=" case.
|
||||
* We'll have one less tuple for "<" and one additional tuple for
|
||||
* We'll have one fewer tuple for "<" and one additional tuple for
|
||||
* ">=", the latter of which we'll reverse the selectivity for
|
||||
* below, so we can simply subtract one tuple for both cases. The
|
||||
* cases that need this adjustment can be identified by iseq being
|
||||
|
2
src/backend/utils/cache/catcache.c
vendored
2
src/backend/utils/cache/catcache.c
vendored
@ -1497,7 +1497,7 @@ GetCatCacheHashValue(CatCache *cache,
|
||||
* It doesn't make any sense to specify all of the cache's key columns
|
||||
* here: since the key is unique, there could be at most one match, so
|
||||
* you ought to use SearchCatCache() instead. Hence this function takes
|
||||
* one less Datum argument than SearchCatCache() does.
|
||||
* one fewer Datum argument than SearchCatCache() does.
|
||||
*
|
||||
* The caller must not modify the list object or the pointed-to tuples,
|
||||
* and must call ReleaseCatCacheList() when done with the list.
|
||||
|
@ -539,7 +539,7 @@ process_queued_fetch_requests(libpq_source *src)
|
||||
chunkoff, rq->path, (int64) rq->offset);
|
||||
|
||||
/*
|
||||
* We should not receive receive more data than we requested, or
|
||||
* We should not receive more data than we requested, or
|
||||
* pg_read_binary_file() messed up. We could receive less,
|
||||
* though, if the file was truncated in the source after we
|
||||
* checked its size. That's OK, there should be a WAL record of
|
||||
|
@ -6458,7 +6458,7 @@ threadRun(void *arg)
|
||||
|
||||
/*
|
||||
* If advanceConnectionState changed client to finished state,
|
||||
* that's one less client that remains.
|
||||
* that's one fewer client that remains.
|
||||
*/
|
||||
if (st->state == CSTATE_FINISHED || st->state == CSTATE_ABORTED)
|
||||
remains--;
|
||||
|
@ -21,7 +21,7 @@
|
||||
|
||||
/*
|
||||
* Maximum length for identifiers (e.g. table names, column names,
|
||||
* function names). Names actually are limited to one less byte than this,
|
||||
* function names). Names actually are limited to one fewer byte than this,
|
||||
* because the length must include a trailing zero byte.
|
||||
*
|
||||
* Changing this requires an initdb.
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
/*
|
||||
* Maximum length for identifiers (e.g. table names, column names,
|
||||
* function names). Names actually are limited to one less byte than this,
|
||||
* function names). Names actually are limited to one fewer byte than this,
|
||||
* because the length must include a trailing zero byte.
|
||||
*
|
||||
* This should be at least as much as NAMEDATALEN of the database the
|
||||
|
@ -4325,7 +4325,7 @@ SELECT f1, polygon(8, f1) FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>';
|
||||
<(100,1),115> | ((-15,1),(18.6827201635,82.3172798365),(100,116),(181.317279836,82.3172798365),(215,1),(181.317279836,-80.3172798365),(100,-114),(18.6827201635,-80.3172798365))
|
||||
(6 rows)
|
||||
|
||||
-- Too less points error
|
||||
-- Error for insufficient number of points
|
||||
SELECT f1, polygon(1, f1) FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>';
|
||||
ERROR: must request at least 2 points
|
||||
-- Zero radius error
|
||||
|
@ -1,7 +1,7 @@
|
||||
-- only use parallelism when explicitly intending to do so
|
||||
SET max_parallel_maintenance_workers = 0;
|
||||
SET max_parallel_workers = 0;
|
||||
-- A table with with contents that, when sorted, triggers abbreviated
|
||||
-- A table with contents that, when sorted, triggers abbreviated
|
||||
-- key aborts. One easy way to achieve that is to use uuids that all
|
||||
-- have the same prefix, as abbreviated keys for uuids just use the
|
||||
-- first sizeof(Datum) bytes.
|
||||
|
@ -424,7 +424,7 @@ SELECT f1, f1::polygon FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>';
|
||||
-- To polygon with less points
|
||||
SELECT f1, polygon(8, f1) FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>';
|
||||
|
||||
-- Too less points error
|
||||
-- Error for insufficient number of points
|
||||
SELECT f1, polygon(1, f1) FROM CIRCLE_TBL WHERE f1 >= '<(0,0),1>';
|
||||
|
||||
-- Zero radius error
|
||||
|
@ -2,7 +2,7 @@
|
||||
SET max_parallel_maintenance_workers = 0;
|
||||
SET max_parallel_workers = 0;
|
||||
|
||||
-- A table with with contents that, when sorted, triggers abbreviated
|
||||
-- A table with contents that, when sorted, triggers abbreviated
|
||||
-- key aborts. One easy way to achieve that is to use uuids that all
|
||||
-- have the same prefix, as abbreviated keys for uuids just use the
|
||||
-- first sizeof(Datum) bytes.
|
||||
|
Loading…
Reference in New Issue
Block a user