2001-06-15 00:49:03 +08:00
|
|
|
/*
|
|
|
|
* dblink.c
|
|
|
|
*
|
|
|
|
* Functions returning results from a remote database
|
|
|
|
*
|
2002-09-02 14:13:31 +08:00
|
|
|
* Joe Conway <mail@joeconway.com>
|
2003-06-25 09:10:15 +08:00
|
|
|
* And contributors:
|
|
|
|
* Darko Prenosil <Darko.Prenosil@finteh.hr>
|
|
|
|
* Shridhar Daithankar <shridhar_daithankar@persistent.co.in>
|
2002-09-02 14:13:31 +08:00
|
|
|
*
|
2010-09-21 04:08:53 +08:00
|
|
|
* contrib/dblink/dblink.c
|
2022-01-08 08:04:57 +08:00
|
|
|
* Copyright (c) 2001-2022, PostgreSQL Global Development Group
|
2002-04-24 10:28:28 +08:00
|
|
|
* ALL RIGHTS RESERVED;
|
2001-06-15 00:49:03 +08:00
|
|
|
*
|
|
|
|
* Permission to use, copy, modify, and distribute this software and its
|
|
|
|
* documentation for any purpose, without fee, and without a written agreement
|
|
|
|
* is hereby granted, provided that the above copyright notice and this
|
|
|
|
* paragraph and the following two paragraphs appear in all copies.
|
|
|
|
*
|
|
|
|
* IN NO EVENT SHALL THE AUTHOR OR DISTRIBUTORS BE LIABLE TO ANY PARTY FOR
|
|
|
|
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING
|
|
|
|
* LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS
|
|
|
|
* DOCUMENTATION, EVEN IF THE AUTHOR OR DISTRIBUTORS HAVE BEEN ADVISED OF THE
|
|
|
|
* POSSIBILITY OF SUCH DAMAGE.
|
|
|
|
*
|
|
|
|
* THE AUTHOR AND DISTRIBUTORS SPECIFICALLY DISCLAIMS ANY WARRANTIES,
|
|
|
|
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
|
|
|
|
* AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
|
|
|
|
* ON AN "AS IS" BASIS, AND THE AUTHOR AND DISTRIBUTORS HAS NO OBLIGATIONS TO
|
|
|
|
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
|
|
|
|
*
|
|
|
|
*/
|
2002-09-02 14:13:31 +08:00
|
|
|
#include "postgres.h"
|
2003-07-25 01:52:50 +08:00
|
|
|
|
2006-03-11 09:19:22 +08:00
|
|
|
#include <limits.h>
|
|
|
|
|
2012-08-31 04:15:44 +08:00
|
|
|
#include "access/htup_details.h"
|
2019-01-22 02:18:20 +08:00
|
|
|
#include "access/relation.h"
|
2012-10-11 04:53:08 +08:00
|
|
|
#include "access/reloptions.h"
|
2019-01-22 02:18:20 +08:00
|
|
|
#include "access/table.h"
|
2002-09-02 14:13:31 +08:00
|
|
|
#include "catalog/namespace.h"
|
2016-12-23 01:19:44 +08:00
|
|
|
#include "catalog/pg_foreign_data_wrapper.h"
|
2012-10-11 04:53:08 +08:00
|
|
|
#include "catalog/pg_foreign_server.h"
|
2002-09-02 14:13:31 +08:00
|
|
|
#include "catalog/pg_type.h"
|
2012-10-11 04:53:08 +08:00
|
|
|
#include "catalog/pg_user_mapping.h"
|
2002-09-02 14:13:31 +08:00
|
|
|
#include "executor/spi.h"
|
2009-06-07 05:27:56 +08:00
|
|
|
#include "foreign/foreign.h"
|
2012-08-31 04:15:44 +08:00
|
|
|
#include "funcapi.h"
|
2012-08-29 11:43:09 +08:00
|
|
|
#include "lib/stringinfo.h"
|
2019-10-23 11:56:22 +08:00
|
|
|
#include "libpq-fe.h"
|
2009-06-10 00:35:36 +08:00
|
|
|
#include "mb/pg_wchar.h"
|
2008-03-27 05:10:39 +08:00
|
|
|
#include "miscadmin.h"
|
2010-06-03 17:38:33 +08:00
|
|
|
#include "parser/scansup.h"
|
2007-08-27 09:24:50 +08:00
|
|
|
#include "utils/acl.h"
|
2002-09-02 14:13:31 +08:00
|
|
|
#include "utils/builtins.h"
|
|
|
|
#include "utils/fmgroids.h"
|
2013-03-23 03:22:15 +08:00
|
|
|
#include "utils/guc.h"
|
2002-09-02 14:13:31 +08:00
|
|
|
#include "utils/lsyscache.h"
|
2003-06-25 09:10:15 +08:00
|
|
|
#include "utils/memutils.h"
|
2011-02-24 01:18:09 +08:00
|
|
|
#include "utils/rel.h"
|
2017-01-21 09:29:53 +08:00
|
|
|
#include "utils/varlena.h"
|
2001-06-15 00:49:03 +08:00
|
|
|
|
2006-05-31 06:12:16 +08:00
|
|
|
PG_MODULE_MAGIC;
|
|
|
|
|
2003-06-25 09:10:15 +08:00
|
|
|
typedef struct remoteConn
|
|
|
|
{
|
2005-10-18 10:55:49 +08:00
|
|
|
PGconn *conn; /* Hold the remote connection */
|
|
|
|
int openCursorCount; /* The number of open cursors */
|
|
|
|
bool newXactForCursor; /* Opened a transaction for a cursor */
|
2003-06-25 09:10:15 +08:00
|
|
|
} remoteConn;
|
|
|
|
|
2012-04-05 06:39:08 +08:00
|
|
|
typedef struct storeInfo
|
|
|
|
{
|
|
|
|
FunctionCallInfo fcinfo;
|
|
|
|
Tuplestorestate *tuplestore;
|
|
|
|
AttInMetadata *attinmeta;
|
|
|
|
MemoryContext tmpcontext;
|
|
|
|
char **cstrs;
|
2012-08-03 01:10:30 +08:00
|
|
|
/* temp storage for results to avoid leaks on exception */
|
|
|
|
PGresult *last_res;
|
|
|
|
PGresult *cur_res;
|
2012-04-05 06:39:08 +08:00
|
|
|
} storeInfo;
|
|
|
|
|
2002-05-28 05:59:12 +08:00
|
|
|
/*
|
|
|
|
* Internal declarations
|
|
|
|
*/
|
2009-06-02 11:21:56 +08:00
|
|
|
static Datum dblink_record_internal(FunctionCallInfo fcinfo, bool is_async);
|
2012-04-04 08:43:15 +08:00
|
|
|
static void prepTuplestoreResult(FunctionCallInfo fcinfo);
|
2013-03-23 03:22:15 +08:00
|
|
|
static void materializeResult(FunctionCallInfo fcinfo, PGconn *conn,
|
|
|
|
PGresult *res);
|
2012-04-05 06:39:08 +08:00
|
|
|
static void materializeQueryResult(FunctionCallInfo fcinfo,
|
|
|
|
PGconn *conn,
|
|
|
|
const char *conname,
|
|
|
|
const char *sql,
|
|
|
|
bool fail);
|
2015-01-27 04:17:33 +08:00
|
|
|
static PGresult *storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const char *sql);
|
|
|
|
static void storeRow(volatile storeInfo *sinfo, PGresult *res, bool first);
|
2003-06-25 09:10:15 +08:00
|
|
|
static remoteConn *getConnectionByName(const char *name);
|
|
|
|
static HTAB *createConnHash(void);
|
2005-10-08 20:12:29 +08:00
|
|
|
static void createNewConnection(const char *name, remoteConn *rconn);
|
2003-06-25 09:10:15 +08:00
|
|
|
static void deleteConnection(const char *name);
|
2018-04-08 04:00:39 +08:00
|
|
|
static char **get_pkey_attnames(Relation rel, int16 *indnkeyatts);
|
2005-11-18 10:38:24 +08:00
|
|
|
static char **get_text_array_contents(ArrayType *array, int *numitems);
|
2010-06-16 00:22:19 +08:00
|
|
|
static char *get_sql_insert(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals, char **tgt_pkattvals);
|
|
|
|
static char *get_sql_delete(Relation rel, int *pkattnums, int pknumatts, char **tgt_pkattvals);
|
|
|
|
static char *get_sql_update(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals, char **tgt_pkattvals);
|
2002-05-28 05:59:12 +08:00
|
|
|
static char *quote_ident_cstr(char *rawstr);
|
2010-06-16 00:22:19 +08:00
|
|
|
static int get_attnum_pk_pos(int *pkattnums, int pknumatts, int key);
|
|
|
|
static HeapTuple get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals);
|
2010-06-15 04:49:33 +08:00
|
|
|
static Relation get_rel_from_relname(text *relname_text, LOCKMODE lockmode, AclMode aclmode);
|
|
|
|
static char *generate_relation_name(Relation rel);
|
2008-09-22 21:55:14 +08:00
|
|
|
static void dblink_connstr_check(const char *connstr);
|
2008-01-04 05:27:59 +08:00
|
|
|
static void dblink_security_check(PGconn *conn, remoteConn *rconn);
|
2016-12-23 01:48:05 +08:00
|
|
|
static void dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
|
2018-03-23 05:33:10 +08:00
|
|
|
bool fail, const char *fmt,...) pg_attribute_printf(5, 6);
|
2009-06-07 05:27:56 +08:00
|
|
|
static char *get_connect_string(const char *servername);
|
|
|
|
static char *escape_param_str(const char *from);
|
2010-06-16 00:22:19 +08:00
|
|
|
static void validate_pkattnums(Relation rel,
|
|
|
|
int2vector *pkattnums_arg, int32 pknumatts_arg,
|
|
|
|
int **pkattnums, int *pknumatts);
|
2012-10-11 04:53:08 +08:00
|
|
|
static bool is_valid_dblink_option(const PQconninfoOption *options,
|
|
|
|
const char *option, Oid context);
|
2013-03-23 03:22:15 +08:00
|
|
|
static int applyRemoteGucs(PGconn *conn);
|
|
|
|
static void restoreLocalGucs(int nestlevel);
|
2002-05-28 05:59:12 +08:00
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
/* Global */
|
2005-10-18 10:55:49 +08:00
|
|
|
static remoteConn *pconn = NULL;
|
|
|
|
static HTAB *remoteConnHash = NULL;
|
2003-06-25 09:10:15 +08:00
|
|
|
|
|
|
|
/*
|
2005-10-08 20:18:48 +08:00
|
|
|
* Following is list that holds multiple remote connections.
|
|
|
|
* Calling convention of each dblink function changes to accept
|
|
|
|
* connection name as the first parameter. The connection list is
|
|
|
|
* much like ecpg e.g. a mapping between a name and a PGconn object.
|
|
|
|
*/
|
2003-06-25 09:10:15 +08:00
|
|
|
|
|
|
|
typedef struct remoteConnHashEnt
|
|
|
|
{
|
|
|
|
char name[NAMEDATALEN];
|
2005-10-08 20:12:29 +08:00
|
|
|
remoteConn *rconn;
|
2003-06-25 09:10:15 +08:00
|
|
|
} remoteConnHashEnt;
|
|
|
|
|
|
|
|
/* initial number of connection hashes */
|
|
|
|
#define NUMCONN 16
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
static char *
|
|
|
|
xpstrdup(const char *in)
|
|
|
|
{
|
|
|
|
if (in == NULL)
|
|
|
|
return NULL;
|
|
|
|
return pstrdup(in);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pg_attribute_noreturn()
|
|
|
|
dblink_res_internalerror(PGconn *conn, PGresult *res, const char *p2)
|
|
|
|
{
|
|
|
|
char *msg = pchomp(PQerrorMessage(conn));
|
2017-05-18 04:31:56 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
if (res)
|
|
|
|
PQclear(res);
|
|
|
|
elog(ERROR, "%s: %s", p2, msg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
pg_attribute_noreturn()
|
|
|
|
dblink_conn_not_avail(const char *conname)
|
|
|
|
{
|
|
|
|
if (conname)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
|
|
|
|
errmsg("connection \"%s\" not available", conname)));
|
|
|
|
else
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_CONNECTION_DOES_NOT_EXIST),
|
|
|
|
errmsg("connection not available")));
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dblink_get_conn(char *conname_or_str,
|
|
|
|
PGconn *volatile *conn_p, char **conname_p, volatile bool *freeconn_p)
|
|
|
|
{
|
|
|
|
remoteConn *rconn = getConnectionByName(conname_or_str);
|
|
|
|
PGconn *conn;
|
|
|
|
char *conname;
|
|
|
|
bool freeconn;
|
|
|
|
|
|
|
|
if (rconn)
|
|
|
|
{
|
|
|
|
conn = rconn->conn;
|
|
|
|
conname = conname_or_str;
|
|
|
|
freeconn = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
const char *connstr;
|
|
|
|
|
|
|
|
connstr = get_connect_string(conname_or_str);
|
|
|
|
if (connstr == NULL)
|
|
|
|
connstr = conname_or_str;
|
|
|
|
dblink_connstr_check(connstr);
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We must obey fd.c's limit on non-virtual file descriptors. Assume
|
|
|
|
* that a PGconn represents one long-lived FD. (Doing this here also
|
|
|
|
* ensures that VFDs are closed if needed to make room.)
|
|
|
|
*/
|
|
|
|
if (!AcquireExternalFD())
|
2020-02-25 07:43:23 +08:00
|
|
|
{
|
|
|
|
#ifndef WIN32 /* can't write #if within ereport() macro */
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
|
|
|
|
errmsg("could not establish connection"),
|
|
|
|
errdetail("There are too many open files on the local server."),
|
2020-02-25 07:43:23 +08:00
|
|
|
errhint("Raise the server's max_files_per_process and/or \"ulimit -n\" limits.")));
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
#else
|
2020-02-25 07:43:23 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
|
|
|
|
errmsg("could not establish connection"),
|
|
|
|
errdetail("There are too many open files on the local server."),
|
|
|
|
errhint("Raise the server's max_files_per_process setting.")));
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
#endif
|
2020-02-25 07:43:23 +08:00
|
|
|
}
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
|
|
|
|
/* OK to make connection */
|
2016-12-26 01:00:00 +08:00
|
|
|
conn = PQconnectdb(connstr);
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
if (PQstatus(conn) == CONNECTION_BAD)
|
|
|
|
{
|
|
|
|
char *msg = pchomp(PQerrorMessage(conn));
|
2017-05-18 04:31:56 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
PQfinish(conn);
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
ReleaseExternalFD();
|
2016-12-26 01:00:00 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
|
|
|
|
errmsg("could not establish connection"),
|
|
|
|
errdetail_internal("%s", msg)));
|
|
|
|
}
|
|
|
|
dblink_security_check(conn, rconn);
|
|
|
|
if (PQclientEncoding(conn) != GetDatabaseEncoding())
|
|
|
|
PQsetClientEncoding(conn, GetDatabaseEncodingName());
|
|
|
|
freeconn = true;
|
|
|
|
conname = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*conn_p = conn;
|
|
|
|
*conname_p = conname;
|
|
|
|
*freeconn_p = freeconn;
|
|
|
|
}
|
|
|
|
|
|
|
|
static PGconn *
|
|
|
|
dblink_get_named_conn(const char *conname)
|
|
|
|
{
|
|
|
|
remoteConn *rconn = getConnectionByName(conname);
|
2017-05-18 04:31:56 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
if (rconn)
|
|
|
|
return rconn->conn;
|
2017-03-14 03:44:50 +08:00
|
|
|
|
|
|
|
dblink_conn_not_avail(conname);
|
|
|
|
return NULL; /* keep compiler quiet */
|
2016-12-26 01:00:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
dblink_init(void)
|
|
|
|
{
|
|
|
|
if (!pconn)
|
|
|
|
{
|
|
|
|
pconn = (remoteConn *) MemoryContextAlloc(TopMemoryContext, sizeof(remoteConn));
|
|
|
|
pconn->conn = NULL;
|
|
|
|
pconn->openCursorCount = 0;
|
2017-08-16 12:22:32 +08:00
|
|
|
pconn->newXactForCursor = false;
|
2016-12-26 01:00:00 +08:00
|
|
|
}
|
|
|
|
}
|
2002-09-02 14:13:31 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a persistent connection to another database
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_connect);
|
|
|
|
Datum
|
|
|
|
dblink_connect(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2009-06-07 05:27:56 +08:00
|
|
|
char *conname_or_str = NULL;
|
2003-06-25 09:10:15 +08:00
|
|
|
char *connstr = NULL;
|
|
|
|
char *connname = NULL;
|
2002-09-02 14:13:31 +08:00
|
|
|
char *msg;
|
2003-06-25 09:10:15 +08:00
|
|
|
PGconn *conn = NULL;
|
2005-10-08 20:12:29 +08:00
|
|
|
remoteConn *rconn = NULL;
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_init();
|
2005-10-18 10:55:49 +08:00
|
|
|
|
2003-06-25 09:10:15 +08:00
|
|
|
if (PG_NARGS() == 2)
|
|
|
|
{
|
2009-06-07 05:27:56 +08:00
|
|
|
conname_or_str = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
2008-03-26 06:42:46 +08:00
|
|
|
connname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
|
|
|
else if (PG_NARGS() == 1)
|
2009-06-07 05:27:56 +08:00
|
|
|
conname_or_str = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2003-06-25 09:10:15 +08:00
|
|
|
if (connname)
|
2020-05-29 01:44:54 +08:00
|
|
|
{
|
2008-12-01 07:23:52 +08:00
|
|
|
rconn = (remoteConn *) MemoryContextAlloc(TopMemoryContext,
|
|
|
|
sizeof(remoteConn));
|
2020-05-29 01:44:54 +08:00
|
|
|
rconn->conn = NULL;
|
|
|
|
rconn->openCursorCount = 0;
|
|
|
|
rconn->newXactForCursor = false;
|
|
|
|
}
|
2008-09-22 21:55:14 +08:00
|
|
|
|
2009-06-07 05:27:56 +08:00
|
|
|
/* first check for valid foreign data server */
|
|
|
|
connstr = get_connect_string(conname_or_str);
|
|
|
|
if (connstr == NULL)
|
|
|
|
connstr = conname_or_str;
|
|
|
|
|
2008-09-22 21:55:14 +08:00
|
|
|
/* check password in connection string if not superuser */
|
|
|
|
dblink_connstr_check(connstr);
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* We must obey fd.c's limit on non-virtual file descriptors. Assume that
|
|
|
|
* a PGconn represents one long-lived FD. (Doing this here also ensures
|
|
|
|
* that VFDs are closed if needed to make room.)
|
|
|
|
*/
|
|
|
|
if (!AcquireExternalFD())
|
2020-02-25 07:43:23 +08:00
|
|
|
{
|
|
|
|
#ifndef WIN32 /* can't write #if within ereport() macro */
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
|
|
|
|
errmsg("could not establish connection"),
|
|
|
|
errdetail("There are too many open files on the local server."),
|
2020-02-25 07:43:23 +08:00
|
|
|
errhint("Raise the server's max_files_per_process and/or \"ulimit -n\" limits.")));
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
#else
|
2020-02-25 07:43:23 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
|
|
|
|
errmsg("could not establish connection"),
|
|
|
|
errdetail("There are too many open files on the local server."),
|
|
|
|
errhint("Raise the server's max_files_per_process setting.")));
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
#endif
|
2020-02-25 07:43:23 +08:00
|
|
|
}
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
|
|
|
|
/* OK to make connection */
|
2003-06-25 09:10:15 +08:00
|
|
|
conn = PQconnectdb(connstr);
|
|
|
|
|
|
|
|
if (PQstatus(conn) == CONNECTION_BAD)
|
2002-09-02 14:13:31 +08:00
|
|
|
{
|
2017-02-27 21:30:06 +08:00
|
|
|
msg = pchomp(PQerrorMessage(conn));
|
2003-06-25 09:10:15 +08:00
|
|
|
PQfinish(conn);
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
ReleaseExternalFD();
|
2005-10-08 20:12:29 +08:00
|
|
|
if (rconn)
|
|
|
|
pfree(rconn);
|
2003-07-25 01:52:50 +08:00
|
|
|
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_SQLCLIENT_UNABLE_TO_ESTABLISH_SQLCONNECTION),
|
|
|
|
errmsg("could not establish connection"),
|
2011-07-17 02:21:12 +08:00
|
|
|
errdetail_internal("%s", msg)));
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
|
|
|
|
2008-09-22 21:55:14 +08:00
|
|
|
/* check password actually used if not superuser */
|
2008-01-04 05:27:59 +08:00
|
|
|
dblink_security_check(conn, rconn);
|
2007-07-09 01:12:38 +08:00
|
|
|
|
2013-12-08 09:00:26 +08:00
|
|
|
/* attempt to set client encoding to match server encoding, if needed */
|
|
|
|
if (PQclientEncoding(conn) != GetDatabaseEncoding())
|
|
|
|
PQsetClientEncoding(conn, GetDatabaseEncodingName());
|
2009-06-10 00:35:36 +08:00
|
|
|
|
2003-06-25 09:10:15 +08:00
|
|
|
if (connname)
|
|
|
|
{
|
2005-10-08 20:12:29 +08:00
|
|
|
rconn->conn = conn;
|
|
|
|
createNewConnection(connname, rconn);
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
|
|
|
else
|
2017-03-12 05:32:18 +08:00
|
|
|
{
|
|
|
|
if (pconn->conn)
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
{
|
2017-03-12 05:32:18 +08:00
|
|
|
PQfinish(pconn->conn);
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
ReleaseExternalFD();
|
|
|
|
}
|
2005-10-18 10:55:49 +08:00
|
|
|
pconn->conn = conn;
|
2017-03-12 05:32:18 +08:00
|
|
|
}
|
2003-06-25 09:10:15 +08:00
|
|
|
|
2008-03-26 06:42:46 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text("OK"));
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Clear a persistent connection to another database
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_disconnect);
|
|
|
|
Datum
|
|
|
|
dblink_disconnect(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2003-07-25 01:52:50 +08:00
|
|
|
char *conname = NULL;
|
2005-10-08 20:12:29 +08:00
|
|
|
remoteConn *rconn = NULL;
|
2003-06-25 09:10:15 +08:00
|
|
|
PGconn *conn = NULL;
|
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_init();
|
2005-10-18 10:55:49 +08:00
|
|
|
|
2003-06-25 09:10:15 +08:00
|
|
|
if (PG_NARGS() == 1)
|
|
|
|
{
|
2008-03-26 06:42:46 +08:00
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2005-10-08 20:12:29 +08:00
|
|
|
rconn = getConnectionByName(conname);
|
|
|
|
if (rconn)
|
|
|
|
conn = rconn->conn;
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
|
|
|
else
|
2005-10-18 10:55:49 +08:00
|
|
|
conn = pconn->conn;
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2003-06-25 09:10:15 +08:00
|
|
|
if (!conn)
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_conn_not_avail(conname);
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2003-06-25 09:10:15 +08:00
|
|
|
PQfinish(conn);
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
ReleaseExternalFD();
|
2005-10-08 20:12:29 +08:00
|
|
|
if (rconn)
|
2003-06-25 09:10:15 +08:00
|
|
|
{
|
2003-07-25 01:52:50 +08:00
|
|
|
deleteConnection(conname);
|
2005-10-08 20:12:29 +08:00
|
|
|
pfree(rconn);
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
2003-11-28 13:03:02 +08:00
|
|
|
else
|
2005-10-18 10:55:49 +08:00
|
|
|
pconn->conn = NULL;
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2008-03-26 06:42:46 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text("OK"));
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* opens a cursor using a persistent connection
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_open);
|
|
|
|
Datum
|
|
|
|
dblink_open(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
PGresult *res = NULL;
|
2017-04-05 21:03:11 +08:00
|
|
|
PGconn *conn;
|
2003-06-25 09:10:15 +08:00
|
|
|
char *curname = NULL;
|
|
|
|
char *sql = NULL;
|
|
|
|
char *conname = NULL;
|
2006-03-01 14:51:01 +08:00
|
|
|
StringInfoData buf;
|
2005-10-08 20:12:29 +08:00
|
|
|
remoteConn *rconn = NULL;
|
2004-03-07 10:27:00 +08:00
|
|
|
bool fail = true; /* default to backward compatible behavior */
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_init();
|
2006-03-01 14:51:01 +08:00
|
|
|
initStringInfo(&buf);
|
2005-10-18 10:55:49 +08:00
|
|
|
|
2003-06-25 09:10:15 +08:00
|
|
|
if (PG_NARGS() == 2)
|
|
|
|
{
|
2004-03-07 10:27:00 +08:00
|
|
|
/* text,text */
|
2008-03-26 06:42:46 +08:00
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
2005-10-18 10:55:49 +08:00
|
|
|
rconn = pconn;
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
|
|
|
else if (PG_NARGS() == 3)
|
|
|
|
{
|
2004-03-07 10:27:00 +08:00
|
|
|
/* might be text,text,text or text,text,bool */
|
|
|
|
if (get_fn_expr_argtype(fcinfo->flinfo, 2) == BOOLOID)
|
|
|
|
{
|
2008-03-26 06:42:46 +08:00
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
2004-03-07 10:27:00 +08:00
|
|
|
fail = PG_GETARG_BOOL(2);
|
2005-10-18 10:55:49 +08:00
|
|
|
rconn = pconn;
|
2004-03-07 10:27:00 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2008-03-26 06:42:46 +08:00
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(2));
|
2005-10-08 20:12:29 +08:00
|
|
|
rconn = getConnectionByName(conname);
|
2004-03-07 10:27:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (PG_NARGS() == 4)
|
|
|
|
{
|
|
|
|
/* text,text,text,bool */
|
2008-03-26 06:42:46 +08:00
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(2));
|
2004-03-07 10:27:00 +08:00
|
|
|
fail = PG_GETARG_BOOL(3);
|
2005-10-08 20:12:29 +08:00
|
|
|
rconn = getConnectionByName(conname);
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
|
|
|
|
2005-10-18 10:55:49 +08:00
|
|
|
if (!rconn || !rconn->conn)
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_conn_not_avail(conname);
|
2017-04-05 21:03:11 +08:00
|
|
|
|
|
|
|
conn = rconn->conn;
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2005-10-18 10:55:49 +08:00
|
|
|
/* If we are not in a transaction, start one */
|
|
|
|
if (PQtransactionStatus(conn) == PQTRANS_IDLE)
|
|
|
|
{
|
|
|
|
res = PQexec(conn, "BEGIN");
|
|
|
|
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_res_internalerror(conn, res, "begin error");
|
2005-10-18 10:55:49 +08:00
|
|
|
PQclear(res);
|
2017-08-16 12:22:32 +08:00
|
|
|
rconn->newXactForCursor = true;
|
2006-10-04 08:30:14 +08:00
|
|
|
|
2006-06-22 00:43:11 +08:00
|
|
|
/*
|
|
|
|
* Since transaction state was IDLE, we force cursor count to
|
|
|
|
* initially be 0. This is needed as a previous ABORT might have wiped
|
|
|
|
* out our transaction without maintaining the cursor count for us.
|
|
|
|
*/
|
|
|
|
rconn->openCursorCount = 0;
|
2005-10-18 10:55:49 +08:00
|
|
|
}
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2005-10-18 10:55:49 +08:00
|
|
|
/* if we started a transaction, increment cursor count */
|
|
|
|
if (rconn->newXactForCursor)
|
|
|
|
(rconn->openCursorCount)++;
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfo(&buf, "DECLARE %s CURSOR FOR %s", curname, sql);
|
|
|
|
res = PQexec(conn, buf.data);
|
2004-03-07 10:27:00 +08:00
|
|
|
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
|
|
|
{
|
2018-03-23 05:33:10 +08:00
|
|
|
dblink_res_error(conn, conname, res, fail,
|
|
|
|
"while opening cursor \"%s\"", curname);
|
2008-07-03 11:56:57 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text("ERROR"));
|
2004-03-07 10:27:00 +08:00
|
|
|
}
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2003-06-25 09:10:15 +08:00
|
|
|
PQclear(res);
|
2008-03-26 06:42:46 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text("OK"));
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
/*
|
|
|
|
* closes a cursor
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_close);
|
|
|
|
Datum
|
|
|
|
dblink_close(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2017-04-05 21:03:11 +08:00
|
|
|
PGconn *conn;
|
2002-09-02 14:13:31 +08:00
|
|
|
PGresult *res = NULL;
|
2003-06-25 09:10:15 +08:00
|
|
|
char *curname = NULL;
|
|
|
|
char *conname = NULL;
|
2006-03-01 14:51:01 +08:00
|
|
|
StringInfoData buf;
|
2005-10-08 20:12:29 +08:00
|
|
|
remoteConn *rconn = NULL;
|
2004-03-07 10:27:00 +08:00
|
|
|
bool fail = true; /* default to backward compatible behavior */
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_init();
|
2006-03-01 14:51:01 +08:00
|
|
|
initStringInfo(&buf);
|
2005-10-18 10:55:49 +08:00
|
|
|
|
2003-06-25 09:10:15 +08:00
|
|
|
if (PG_NARGS() == 1)
|
|
|
|
{
|
2004-03-07 10:27:00 +08:00
|
|
|
/* text */
|
2008-03-26 06:42:46 +08:00
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2005-10-18 10:55:49 +08:00
|
|
|
rconn = pconn;
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
|
|
|
else if (PG_NARGS() == 2)
|
|
|
|
{
|
2004-03-07 10:27:00 +08:00
|
|
|
/* might be text,text or text,bool */
|
|
|
|
if (get_fn_expr_argtype(fcinfo->flinfo, 1) == BOOLOID)
|
|
|
|
{
|
2008-03-26 06:42:46 +08:00
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2004-03-07 10:27:00 +08:00
|
|
|
fail = PG_GETARG_BOOL(1);
|
2005-10-18 10:55:49 +08:00
|
|
|
rconn = pconn;
|
2004-03-07 10:27:00 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2008-03-26 06:42:46 +08:00
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
2005-10-08 20:12:29 +08:00
|
|
|
rconn = getConnectionByName(conname);
|
2004-03-07 10:27:00 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (PG_NARGS() == 3)
|
|
|
|
{
|
|
|
|
/* text,text,bool */
|
2008-03-26 06:42:46 +08:00
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
2004-03-07 10:27:00 +08:00
|
|
|
fail = PG_GETARG_BOOL(2);
|
2005-10-08 20:12:29 +08:00
|
|
|
rconn = getConnectionByName(conname);
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
|
|
|
|
2005-10-18 10:55:49 +08:00
|
|
|
if (!rconn || !rconn->conn)
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_conn_not_avail(conname);
|
2017-04-05 21:03:11 +08:00
|
|
|
|
|
|
|
conn = rconn->conn;
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfo(&buf, "CLOSE %s", curname);
|
2002-09-02 14:13:31 +08:00
|
|
|
|
|
|
|
/* close the cursor */
|
2006-03-01 14:51:01 +08:00
|
|
|
res = PQexec(conn, buf.data);
|
2002-09-02 14:13:31 +08:00
|
|
|
if (!res || PQresultStatus(res) != PGRES_COMMAND_OK)
|
2004-03-07 10:27:00 +08:00
|
|
|
{
|
2018-03-23 05:33:10 +08:00
|
|
|
dblink_res_error(conn, conname, res, fail,
|
|
|
|
"while closing cursor \"%s\"", curname);
|
2008-07-03 11:56:57 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text("ERROR"));
|
2004-03-07 10:27:00 +08:00
|
|
|
}
|
2002-09-02 14:13:31 +08:00
|
|
|
|
|
|
|
PQclear(res);
|
|
|
|
|
2005-10-18 10:55:49 +08:00
|
|
|
/* if we started a transaction, decrement cursor count */
|
|
|
|
if (rconn->newXactForCursor)
|
|
|
|
{
|
|
|
|
(rconn->openCursorCount)--;
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2005-10-18 10:55:49 +08:00
|
|
|
/* if count is zero, commit the transaction */
|
|
|
|
if (rconn->openCursorCount == 0)
|
|
|
|
{
|
2017-08-16 12:22:32 +08:00
|
|
|
rconn->newXactForCursor = false;
|
2005-10-18 10:55:49 +08:00
|
|
|
|
|
|
|
res = PQexec(conn, "COMMIT");
|
|
|
|
if (PQresultStatus(res) != PGRES_COMMAND_OK)
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_res_internalerror(conn, res, "commit error");
|
2005-10-18 10:55:49 +08:00
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
}
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2008-03-26 06:42:46 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text("OK"));
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fetch results from an open cursor
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_fetch);
|
|
|
|
Datum
|
|
|
|
dblink_fetch(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2010-01-25 06:19:38 +08:00
|
|
|
PGresult *res = NULL;
|
|
|
|
char *conname = NULL;
|
|
|
|
remoteConn *rconn = NULL;
|
|
|
|
PGconn *conn = NULL;
|
|
|
|
StringInfoData buf;
|
|
|
|
char *curname = NULL;
|
|
|
|
int howmany = 0;
|
|
|
|
bool fail = true; /* default to backward compatible */
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2012-04-04 08:43:15 +08:00
|
|
|
prepTuplestoreResult(fcinfo);
|
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_init();
|
2005-10-18 10:55:49 +08:00
|
|
|
|
2010-01-25 06:19:38 +08:00
|
|
|
if (PG_NARGS() == 4)
|
2002-09-02 14:13:31 +08:00
|
|
|
{
|
2010-01-25 06:19:38 +08:00
|
|
|
/* text,text,int,bool */
|
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
|
|
|
howmany = PG_GETARG_INT32(2);
|
|
|
|
fail = PG_GETARG_BOOL(3);
|
2003-06-25 09:10:15 +08:00
|
|
|
|
2010-01-25 06:19:38 +08:00
|
|
|
rconn = getConnectionByName(conname);
|
|
|
|
if (rconn)
|
|
|
|
conn = rconn->conn;
|
|
|
|
}
|
|
|
|
else if (PG_NARGS() == 3)
|
|
|
|
{
|
|
|
|
/* text,text,int or text,int,bool */
|
|
|
|
if (get_fn_expr_argtype(fcinfo->flinfo, 2) == BOOLOID)
|
|
|
|
{
|
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
howmany = PG_GETARG_INT32(1);
|
|
|
|
fail = PG_GETARG_BOOL(2);
|
|
|
|
conn = pconn->conn;
|
|
|
|
}
|
|
|
|
else
|
2003-06-25 09:10:15 +08:00
|
|
|
{
|
2008-03-26 06:42:46 +08:00
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
2003-06-25 09:10:15 +08:00
|
|
|
howmany = PG_GETARG_INT32(2);
|
|
|
|
|
2005-10-08 20:12:29 +08:00
|
|
|
rconn = getConnectionByName(conname);
|
|
|
|
if (rconn)
|
|
|
|
conn = rconn->conn;
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
2010-01-25 06:19:38 +08:00
|
|
|
}
|
|
|
|
else if (PG_NARGS() == 2)
|
|
|
|
{
|
|
|
|
/* text,int */
|
|
|
|
curname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
howmany = PG_GETARG_INT32(1);
|
|
|
|
conn = pconn->conn;
|
|
|
|
}
|
2008-12-01 07:23:52 +08:00
|
|
|
|
2010-01-25 06:19:38 +08:00
|
|
|
if (!conn)
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_conn_not_avail(conname);
|
2008-12-01 07:23:52 +08:00
|
|
|
|
2010-01-25 06:19:38 +08:00
|
|
|
initStringInfo(&buf);
|
|
|
|
appendStringInfo(&buf, "FETCH %d FROM %s", howmany, curname);
|
2002-09-02 14:13:31 +08:00
|
|
|
|
|
|
|
/*
|
2010-01-25 06:19:38 +08:00
|
|
|
* Try to execute the query. Note that since libpq uses malloc, the
|
|
|
|
* PGresult will be long-lived even though we are still in a short-lived
|
|
|
|
* memory context.
|
2002-09-02 14:13:31 +08:00
|
|
|
*/
|
2010-01-25 06:19:38 +08:00
|
|
|
res = PQexec(conn, buf.data);
|
|
|
|
if (!res ||
|
|
|
|
(PQresultStatus(res) != PGRES_COMMAND_OK &&
|
|
|
|
PQresultStatus(res) != PGRES_TUPLES_OK))
|
2002-09-02 14:13:31 +08:00
|
|
|
{
|
2018-03-23 05:33:10 +08:00
|
|
|
dblink_res_error(conn, conname, res, fail,
|
|
|
|
"while fetching from cursor \"%s\"", curname);
|
2010-01-25 06:19:38 +08:00
|
|
|
return (Datum) 0;
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
2010-01-25 06:19:38 +08:00
|
|
|
else if (PQresultStatus(res) == PGRES_COMMAND_OK)
|
2002-09-02 14:13:31 +08:00
|
|
|
{
|
2010-01-25 06:19:38 +08:00
|
|
|
/* cursor does not exist - closed already or bad name */
|
2002-09-02 14:13:31 +08:00
|
|
|
PQclear(res);
|
2010-01-25 06:19:38 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_CURSOR_NAME),
|
|
|
|
errmsg("cursor \"%s\" does not exist", curname)));
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
2010-01-25 06:19:38 +08:00
|
|
|
|
2013-03-23 03:22:15 +08:00
|
|
|
materializeResult(fcinfo, conn, res);
|
2010-01-25 06:19:38 +08:00
|
|
|
return (Datum) 0;
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note: this is the new preferred version of dblink
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_record);
|
|
|
|
Datum
|
|
|
|
dblink_record(PG_FUNCTION_ARGS)
|
2006-09-03 05:11:15 +08:00
|
|
|
{
|
2009-06-02 11:21:56 +08:00
|
|
|
return dblink_record_internal(fcinfo, false);
|
2006-09-03 05:11:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_send_query);
|
|
|
|
Datum
|
|
|
|
dblink_send_query(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2016-12-26 01:00:00 +08:00
|
|
|
PGconn *conn;
|
|
|
|
char *sql;
|
2009-06-02 11:21:56 +08:00
|
|
|
int retval;
|
|
|
|
|
|
|
|
if (PG_NARGS() == 2)
|
|
|
|
{
|
2016-12-26 01:00:00 +08:00
|
|
|
conn = dblink_get_named_conn(text_to_cstring(PG_GETARG_TEXT_PP(0)));
|
2009-06-02 11:21:56 +08:00
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
|
|
|
}
|
|
|
|
else
|
|
|
|
/* shouldn't happen */
|
|
|
|
elog(ERROR, "wrong number of arguments");
|
|
|
|
|
|
|
|
/* async query send */
|
|
|
|
retval = PQsendQuery(conn, sql);
|
|
|
|
if (retval != 1)
|
2017-02-27 21:30:06 +08:00
|
|
|
elog(NOTICE, "could not send query: %s", pchomp(PQerrorMessage(conn)));
|
2009-06-02 11:21:56 +08:00
|
|
|
|
|
|
|
PG_RETURN_INT32(retval);
|
2006-09-03 05:11:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_get_result);
|
|
|
|
Datum
|
|
|
|
dblink_get_result(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2009-06-02 11:21:56 +08:00
|
|
|
return dblink_record_internal(fcinfo, true);
|
2006-09-03 05:11:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static Datum
|
2009-06-02 11:21:56 +08:00
|
|
|
dblink_record_internal(FunctionCallInfo fcinfo, bool is_async)
|
2002-09-02 14:13:31 +08:00
|
|
|
{
|
2012-04-05 06:39:08 +08:00
|
|
|
PGconn *volatile conn = NULL;
|
|
|
|
volatile bool freeconn = false;
|
2010-01-25 06:19:38 +08:00
|
|
|
|
2012-04-04 08:43:15 +08:00
|
|
|
prepTuplestoreResult(fcinfo);
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_init();
|
2005-10-18 10:55:49 +08:00
|
|
|
|
2012-04-05 06:39:08 +08:00
|
|
|
PG_TRY();
|
2002-09-02 14:13:31 +08:00
|
|
|
{
|
2012-04-05 06:39:08 +08:00
|
|
|
char *sql = NULL;
|
|
|
|
char *conname = NULL;
|
|
|
|
bool fail = true; /* default to backward compatible */
|
|
|
|
|
|
|
|
if (!is_async)
|
2002-09-02 14:13:31 +08:00
|
|
|
{
|
2012-04-05 06:39:08 +08:00
|
|
|
if (PG_NARGS() == 3)
|
|
|
|
{
|
|
|
|
/* text,text,bool */
|
2017-03-28 23:08:38 +08:00
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2012-04-05 06:39:08 +08:00
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
|
|
|
fail = PG_GETARG_BOOL(2);
|
2017-03-28 23:08:38 +08:00
|
|
|
dblink_get_conn(conname, &conn, &conname, &freeconn);
|
2012-04-05 06:39:08 +08:00
|
|
|
}
|
|
|
|
else if (PG_NARGS() == 2)
|
|
|
|
{
|
|
|
|
/* text,text or text,bool */
|
|
|
|
if (get_fn_expr_argtype(fcinfo->flinfo, 1) == BOOLOID)
|
|
|
|
{
|
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
fail = PG_GETARG_BOOL(1);
|
2017-03-28 23:08:38 +08:00
|
|
|
conn = pconn->conn;
|
2012-04-05 06:39:08 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2017-03-28 23:08:38 +08:00
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2012-04-05 06:39:08 +08:00
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
2017-03-28 23:08:38 +08:00
|
|
|
dblink_get_conn(conname, &conn, &conname, &freeconn);
|
2012-04-05 06:39:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (PG_NARGS() == 1)
|
2004-03-07 10:27:00 +08:00
|
|
|
{
|
2012-04-05 06:39:08 +08:00
|
|
|
/* text */
|
2010-01-25 06:19:38 +08:00
|
|
|
conn = pconn->conn;
|
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2004-03-07 10:27:00 +08:00
|
|
|
}
|
2010-01-25 06:19:38 +08:00
|
|
|
else
|
2012-04-05 06:39:08 +08:00
|
|
|
/* shouldn't happen */
|
|
|
|
elog(ERROR, "wrong number of arguments");
|
|
|
|
}
|
|
|
|
else /* is_async */
|
|
|
|
{
|
|
|
|
/* get async result */
|
2017-03-28 23:08:38 +08:00
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
|
2012-04-05 06:39:08 +08:00
|
|
|
if (PG_NARGS() == 2)
|
2006-09-03 05:11:15 +08:00
|
|
|
{
|
2012-04-05 06:39:08 +08:00
|
|
|
/* text,bool */
|
|
|
|
fail = PG_GETARG_BOOL(1);
|
2017-03-28 23:08:38 +08:00
|
|
|
conn = dblink_get_named_conn(conname);
|
2006-09-03 05:11:15 +08:00
|
|
|
}
|
2012-04-05 06:39:08 +08:00
|
|
|
else if (PG_NARGS() == 1)
|
|
|
|
{
|
|
|
|
/* text */
|
2017-03-28 23:08:38 +08:00
|
|
|
conn = dblink_get_named_conn(conname);
|
2012-04-05 06:39:08 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
/* shouldn't happen */
|
|
|
|
elog(ERROR, "wrong number of arguments");
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
2012-04-05 06:39:08 +08:00
|
|
|
|
|
|
|
if (!conn)
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_conn_not_avail(conname);
|
2012-04-05 06:39:08 +08:00
|
|
|
|
|
|
|
if (!is_async)
|
2010-01-25 06:19:38 +08:00
|
|
|
{
|
2012-04-05 06:39:08 +08:00
|
|
|
/* synchronous query, use efficient tuple collection method */
|
|
|
|
materializeQueryResult(fcinfo, conn, conname, sql, fail);
|
2010-01-25 06:19:38 +08:00
|
|
|
}
|
2004-03-07 10:27:00 +08:00
|
|
|
else
|
|
|
|
{
|
2012-04-05 06:39:08 +08:00
|
|
|
/* async result retrieval, do it the old way */
|
|
|
|
PGresult *res = PQgetResult(conn);
|
|
|
|
|
|
|
|
/* NULL means we're all done with the async results */
|
|
|
|
if (res)
|
|
|
|
{
|
|
|
|
if (PQresultStatus(res) != PGRES_COMMAND_OK &&
|
|
|
|
PQresultStatus(res) != PGRES_TUPLES_OK)
|
|
|
|
{
|
2018-03-23 05:33:10 +08:00
|
|
|
dblink_res_error(conn, conname, res, fail,
|
|
|
|
"while executing query");
|
2012-04-05 06:39:08 +08:00
|
|
|
/* if fail isn't set, we'll return an empty query result */
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2013-03-23 03:22:15 +08:00
|
|
|
materializeResult(fcinfo, conn, res);
|
2012-04-05 06:39:08 +08:00
|
|
|
}
|
|
|
|
}
|
2009-06-11 22:49:15 +08:00
|
|
|
}
|
2010-01-25 06:19:38 +08:00
|
|
|
}
|
2019-11-01 18:09:52 +08:00
|
|
|
PG_FINALLY();
|
2010-01-25 06:19:38 +08:00
|
|
|
{
|
2012-04-05 06:39:08 +08:00
|
|
|
/* if needed, close the connection to the database */
|
|
|
|
if (freeconn)
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
{
|
2012-04-05 06:39:08 +08:00
|
|
|
PQfinish(conn);
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
ReleaseExternalFD();
|
|
|
|
}
|
2010-01-25 06:19:38 +08:00
|
|
|
}
|
2012-04-05 06:39:08 +08:00
|
|
|
PG_END_TRY();
|
2010-01-25 06:19:38 +08:00
|
|
|
|
|
|
|
return (Datum) 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-04-04 08:43:15 +08:00
|
|
|
* Verify function caller can handle a tuplestore result, and set up for that.
|
|
|
|
*
|
|
|
|
* Note: if the caller returns without actually creating a tuplestore, the
|
|
|
|
* executor will treat the function result as an empty set.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
prepTuplestoreResult(FunctionCallInfo fcinfo)
|
|
|
|
{
|
|
|
|
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
|
|
|
|
|
|
|
|
/* check to see if query supports us returning a tuplestore */
|
|
|
|
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("set-valued function called in context that cannot accept a set")));
|
|
|
|
if (!(rsinfo->allowedModes & SFRM_Materialize))
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("materialize mode required, but it is not allowed in this context")));
|
|
|
|
|
|
|
|
/* let the executor know we're sending back a tuplestore */
|
|
|
|
rsinfo->returnMode = SFRM_Materialize;
|
|
|
|
|
|
|
|
/* caller must fill these to return a non-empty result */
|
|
|
|
rsinfo->setResult = NULL;
|
|
|
|
rsinfo->setDesc = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the contents of the PGresult into a tuplestore to be returned
|
|
|
|
* as the result of the current function.
|
|
|
|
* The PGresult will be released in this function.
|
2010-01-25 06:19:38 +08:00
|
|
|
*/
|
|
|
|
static void
|
2013-03-23 03:22:15 +08:00
|
|
|
materializeResult(FunctionCallInfo fcinfo, PGconn *conn, PGresult *res)
|
2010-01-25 06:19:38 +08:00
|
|
|
{
|
|
|
|
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
|
|
|
|
|
2012-04-04 08:43:15 +08:00
|
|
|
/* prepTuplestoreResult must have been called previously */
|
2010-01-25 06:19:38 +08:00
|
|
|
Assert(rsinfo->returnMode == SFRM_Materialize);
|
|
|
|
|
|
|
|
PG_TRY();
|
|
|
|
{
|
|
|
|
TupleDesc tupdesc;
|
2013-03-23 03:22:15 +08:00
|
|
|
bool is_sql_cmd;
|
2010-01-25 06:19:38 +08:00
|
|
|
int ntuples;
|
|
|
|
int nfields;
|
2006-10-04 08:30:14 +08:00
|
|
|
|
2006-09-03 05:11:15 +08:00
|
|
|
if (PQresultStatus(res) == PGRES_COMMAND_OK)
|
|
|
|
{
|
|
|
|
is_sql_cmd = true;
|
2006-10-04 08:30:14 +08:00
|
|
|
|
2010-01-25 06:19:38 +08:00
|
|
|
/*
|
|
|
|
* need a tuple descriptor representing one TEXT column to return
|
|
|
|
* the command status string as our result tuple
|
|
|
|
*/
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 07:36:57 +08:00
|
|
|
tupdesc = CreateTemplateTupleDesc(1);
|
2006-09-03 05:11:15 +08:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "status",
|
|
|
|
TEXTOID, -1, 0);
|
2010-01-25 06:19:38 +08:00
|
|
|
ntuples = 1;
|
|
|
|
nfields = 1;
|
2009-06-11 22:49:15 +08:00
|
|
|
}
|
2006-09-03 05:11:15 +08:00
|
|
|
else
|
2010-01-25 06:19:38 +08:00
|
|
|
{
|
|
|
|
Assert(PQresultStatus(res) == PGRES_TUPLES_OK);
|
2009-06-11 22:49:15 +08:00
|
|
|
|
2010-01-25 06:19:38 +08:00
|
|
|
is_sql_cmd = false;
|
2009-06-11 22:49:15 +08:00
|
|
|
|
2006-09-03 05:11:15 +08:00
|
|
|
/* get a tuple descriptor for our result type */
|
|
|
|
switch (get_call_result_type(fcinfo, NULL, &tupdesc))
|
|
|
|
{
|
2008-12-01 07:23:52 +08:00
|
|
|
case TYPEFUNC_COMPOSITE:
|
2006-09-03 05:11:15 +08:00
|
|
|
/* success */
|
2008-12-01 07:23:52 +08:00
|
|
|
break;
|
2006-09-03 05:11:15 +08:00
|
|
|
case TYPEFUNC_RECORD:
|
2008-12-01 07:23:52 +08:00
|
|
|
/* failed to determine actual type of RECORD */
|
2006-09-03 05:11:15 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("function returning record called in context "
|
|
|
|
"that cannot accept type record")));
|
2009-06-11 22:49:15 +08:00
|
|
|
break;
|
|
|
|
default:
|
2005-05-31 07:09:07 +08:00
|
|
|
/* result type isn't composite */
|
2006-09-03 05:11:15 +08:00
|
|
|
elog(ERROR, "return type must be a row type");
|
2009-06-11 22:49:15 +08:00
|
|
|
break;
|
2006-09-03 05:11:15 +08:00
|
|
|
}
|
2006-10-04 08:30:14 +08:00
|
|
|
|
2006-09-03 05:11:15 +08:00
|
|
|
/* make sure we have a persistent copy of the tupdesc */
|
|
|
|
tupdesc = CreateTupleDescCopy(tupdesc);
|
2010-01-25 06:19:38 +08:00
|
|
|
ntuples = PQntuples(res);
|
|
|
|
nfields = PQnfields(res);
|
2009-06-11 22:49:15 +08:00
|
|
|
}
|
2006-10-04 08:30:14 +08:00
|
|
|
|
2009-06-11 22:49:15 +08:00
|
|
|
/*
|
2006-01-04 07:45:52 +08:00
|
|
|
* check result and tuple descriptor have the same number of columns
|
2009-06-11 22:49:15 +08:00
|
|
|
*/
|
2010-01-25 06:19:38 +08:00
|
|
|
if (nfields != tupdesc->natts)
|
2006-09-03 05:11:15 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
|
|
|
errmsg("remote query result rowtype does not match "
|
|
|
|
"the specified FROM clause rowtype")));
|
2009-06-11 22:49:15 +08:00
|
|
|
|
2010-01-25 06:19:38 +08:00
|
|
|
if (ntuples > 0)
|
2009-06-11 22:49:15 +08:00
|
|
|
{
|
2010-01-25 06:19:38 +08:00
|
|
|
AttInMetadata *attinmeta;
|
2013-03-23 03:22:15 +08:00
|
|
|
int nestlevel = -1;
|
2010-01-25 06:19:38 +08:00
|
|
|
Tuplestorestate *tupstore;
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
int row;
|
|
|
|
char **values;
|
|
|
|
|
|
|
|
attinmeta = TupleDescGetAttInMetadata(tupdesc);
|
|
|
|
|
2013-03-23 03:22:15 +08:00
|
|
|
/* Set GUCs to ensure we read GUC-sensitive data types correctly */
|
|
|
|
if (!is_sql_cmd)
|
|
|
|
nestlevel = applyRemoteGucs(conn);
|
|
|
|
|
2010-01-25 06:19:38 +08:00
|
|
|
oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory);
|
|
|
|
tupstore = tuplestore_begin_heap(true, false, work_mem);
|
|
|
|
rsinfo->setResult = tupstore;
|
|
|
|
rsinfo->setDesc = tupdesc;
|
2006-09-03 05:11:15 +08:00
|
|
|
MemoryContextSwitchTo(oldcontext);
|
2002-09-02 14:13:31 +08:00
|
|
|
|
|
|
|
values = (char **) palloc(nfields * sizeof(char *));
|
2010-01-25 06:19:38 +08:00
|
|
|
|
|
|
|
/* put all tuples into the tuplestore */
|
|
|
|
for (row = 0; row < ntuples; row++)
|
2002-09-02 14:13:31 +08:00
|
|
|
{
|
2010-01-25 06:19:38 +08:00
|
|
|
HeapTuple tuple;
|
|
|
|
|
|
|
|
if (!is_sql_cmd)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < nfields; i++)
|
|
|
|
{
|
|
|
|
if (PQgetisnull(res, row, i))
|
|
|
|
values[i] = NULL;
|
|
|
|
else
|
|
|
|
values[i] = PQgetvalue(res, row, i);
|
|
|
|
}
|
|
|
|
}
|
2002-09-02 14:13:31 +08:00
|
|
|
else
|
2010-01-25 06:19:38 +08:00
|
|
|
{
|
|
|
|
values[0] = PQcmdStatus(res);
|
|
|
|
}
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2010-01-25 06:19:38 +08:00
|
|
|
/* build the tuple and put it into the tuplestore. */
|
|
|
|
tuple = BuildTupleFromCStrings(attinmeta, values);
|
|
|
|
tuplestore_puttuple(tupstore, tuple);
|
|
|
|
}
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2013-03-23 03:22:15 +08:00
|
|
|
/* clean up GUC settings, if we changed any */
|
|
|
|
restoreLocalGucs(nestlevel);
|
2010-01-25 06:19:38 +08:00
|
|
|
}
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
2019-11-01 18:09:52 +08:00
|
|
|
PG_FINALLY();
|
2002-09-02 14:13:31 +08:00
|
|
|
{
|
2010-01-25 06:19:38 +08:00
|
|
|
/* be sure to release the libpq result */
|
2002-09-02 14:13:31 +08:00
|
|
|
PQclear(res);
|
|
|
|
}
|
2010-01-25 06:19:38 +08:00
|
|
|
PG_END_TRY();
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
|
|
|
|
2012-04-05 06:39:08 +08:00
|
|
|
/*
|
|
|
|
* Execute the given SQL command and store its results into a tuplestore
|
|
|
|
* to be returned as the result of the current function.
|
2012-08-03 01:10:30 +08:00
|
|
|
*
|
2012-04-05 06:39:08 +08:00
|
|
|
* This is equivalent to PQexec followed by materializeResult, but we make
|
2012-08-03 01:10:30 +08:00
|
|
|
* use of libpq's single-row mode to avoid accumulating the whole result
|
|
|
|
* inside libpq before it gets transferred to the tuplestore.
|
2012-04-05 06:39:08 +08:00
|
|
|
*/
|
|
|
|
static void
|
|
|
|
materializeQueryResult(FunctionCallInfo fcinfo,
|
|
|
|
PGconn *conn,
|
|
|
|
const char *conname,
|
|
|
|
const char *sql,
|
|
|
|
bool fail)
|
|
|
|
{
|
|
|
|
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
|
|
|
|
PGresult *volatile res = NULL;
|
2019-03-25 16:35:22 +08:00
|
|
|
volatile storeInfo sinfo = {0};
|
2012-04-05 06:39:08 +08:00
|
|
|
|
|
|
|
/* prepTuplestoreResult must have been called previously */
|
|
|
|
Assert(rsinfo->returnMode == SFRM_Materialize);
|
|
|
|
|
2012-08-03 01:10:30 +08:00
|
|
|
sinfo.fcinfo = fcinfo;
|
|
|
|
|
2012-04-05 06:39:08 +08:00
|
|
|
PG_TRY();
|
|
|
|
{
|
2014-06-21 03:22:13 +08:00
|
|
|
/* Create short-lived memory context for data conversions */
|
|
|
|
sinfo.tmpcontext = AllocSetContextCreate(CurrentMemoryContext,
|
|
|
|
"dblink temporary context",
|
Add macros to make AllocSetContextCreate() calls simpler and safer.
I found that half a dozen (nearly 5%) of our AllocSetContextCreate calls
had typos in the context-sizing parameters. While none of these led to
especially significant problems, they did create minor inefficiencies,
and it's now clear that expecting people to copy-and-paste those calls
accurately is not a great idea. Let's reduce the risk of future errors
by introducing single macros that encapsulate the common use-cases.
Three such macros are enough to cover all but two special-purpose contexts;
those two calls can be left as-is, I think.
While this patch doesn't in itself improve matters for third-party
extensions, it doesn't break anything for them either, and they can
gradually adopt the simplified notation over time.
In passing, change TopMemoryContext to use the default allocation
parameters. Formerly it could only be extended 8K at a time. That was
probably reasonable when this code was written; but nowadays we create
many more contexts than we did then, so that it's not unusual to have a
couple hundred K in TopMemoryContext, even without considering various
dubious code that sticks other things there. There seems no good reason
not to let it use growing blocks like most other contexts.
Back-patch to 9.6, mostly because that's still close enough to HEAD that
it's easy to do so, and keeping the branches in sync can be expected to
avoid some future back-patching pain. The bugs fixed by these changes
don't seem to be significant enough to justify fixing them further back.
Discussion: <21072.1472321324@sss.pgh.pa.us>
2016-08-28 05:50:38 +08:00
|
|
|
ALLOCSET_DEFAULT_SIZES);
|
2014-06-21 03:22:13 +08:00
|
|
|
|
2012-08-03 01:10:30 +08:00
|
|
|
/* execute query, collecting any tuples into the tuplestore */
|
|
|
|
res = storeQueryResult(&sinfo, conn, sql);
|
2012-04-05 06:39:08 +08:00
|
|
|
|
|
|
|
if (!res ||
|
|
|
|
(PQresultStatus(res) != PGRES_COMMAND_OK &&
|
|
|
|
PQresultStatus(res) != PGRES_TUPLES_OK))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* dblink_res_error will clear the passed PGresult, so we need
|
|
|
|
* this ugly dance to avoid doing so twice during error exit
|
|
|
|
*/
|
|
|
|
PGresult *res1 = res;
|
|
|
|
|
|
|
|
res = NULL;
|
2018-03-23 05:33:10 +08:00
|
|
|
dblink_res_error(conn, conname, res1, fail,
|
|
|
|
"while executing query");
|
2012-04-05 06:39:08 +08:00
|
|
|
/* if fail isn't set, we'll return an empty query result */
|
|
|
|
}
|
|
|
|
else if (PQresultStatus(res) == PGRES_COMMAND_OK)
|
|
|
|
{
|
|
|
|
/*
|
2012-08-03 01:10:30 +08:00
|
|
|
* storeRow didn't get called, so we need to convert the command
|
|
|
|
* status string to a tuple manually
|
2012-04-05 06:39:08 +08:00
|
|
|
*/
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
AttInMetadata *attinmeta;
|
|
|
|
Tuplestorestate *tupstore;
|
|
|
|
HeapTuple tuple;
|
|
|
|
char *values[1];
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* need a tuple descriptor representing one TEXT column to return
|
|
|
|
* the command status string as our result tuple
|
|
|
|
*/
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 07:36:57 +08:00
|
|
|
tupdesc = CreateTemplateTupleDesc(1);
|
2012-04-05 06:39:08 +08:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "status",
|
|
|
|
TEXTOID, -1, 0);
|
|
|
|
attinmeta = TupleDescGetAttInMetadata(tupdesc);
|
|
|
|
|
|
|
|
oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory);
|
|
|
|
tupstore = tuplestore_begin_heap(true, false, work_mem);
|
|
|
|
rsinfo->setResult = tupstore;
|
|
|
|
rsinfo->setDesc = tupdesc;
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
|
|
|
values[0] = PQcmdStatus(res);
|
|
|
|
|
|
|
|
/* build the tuple and put it into the tuplestore. */
|
|
|
|
tuple = BuildTupleFromCStrings(attinmeta, values);
|
|
|
|
tuplestore_puttuple(tupstore, tuple);
|
|
|
|
|
|
|
|
PQclear(res);
|
2012-08-03 01:10:30 +08:00
|
|
|
res = NULL;
|
2012-04-05 06:39:08 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
Assert(PQresultStatus(res) == PGRES_TUPLES_OK);
|
2012-08-03 01:10:30 +08:00
|
|
|
/* storeRow should have created a tuplestore */
|
2012-04-05 06:39:08 +08:00
|
|
|
Assert(rsinfo->setResult != NULL);
|
|
|
|
|
|
|
|
PQclear(res);
|
2012-08-03 01:10:30 +08:00
|
|
|
res = NULL;
|
2012-04-05 06:39:08 +08:00
|
|
|
}
|
2014-06-21 03:22:13 +08:00
|
|
|
|
|
|
|
/* clean up data conversion short-lived memory context */
|
|
|
|
if (sinfo.tmpcontext != NULL)
|
|
|
|
MemoryContextDelete(sinfo.tmpcontext);
|
|
|
|
sinfo.tmpcontext = NULL;
|
|
|
|
|
2012-08-03 01:10:30 +08:00
|
|
|
PQclear(sinfo.last_res);
|
|
|
|
sinfo.last_res = NULL;
|
|
|
|
PQclear(sinfo.cur_res);
|
|
|
|
sinfo.cur_res = NULL;
|
2012-04-05 06:39:08 +08:00
|
|
|
}
|
|
|
|
PG_CATCH();
|
|
|
|
{
|
|
|
|
/* be sure to release any libpq result we collected */
|
2012-08-03 01:10:30 +08:00
|
|
|
PQclear(res);
|
|
|
|
PQclear(sinfo.last_res);
|
|
|
|
PQclear(sinfo.cur_res);
|
2012-04-05 06:39:08 +08:00
|
|
|
/* and clear out any pending data in libpq */
|
2012-08-03 01:10:30 +08:00
|
|
|
while ((res = PQgetResult(conn)) != NULL)
|
2012-04-05 06:39:08 +08:00
|
|
|
PQclear(res);
|
|
|
|
PG_RE_THROW();
|
|
|
|
}
|
|
|
|
PG_END_TRY();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2012-08-03 01:10:30 +08:00
|
|
|
* Execute query, and send any result rows to sinfo->tuplestore.
|
|
|
|
*/
|
|
|
|
static PGresult *
|
2015-01-27 04:17:33 +08:00
|
|
|
storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const char *sql)
|
2012-08-03 01:10:30 +08:00
|
|
|
{
|
|
|
|
bool first = true;
|
2013-03-23 03:22:15 +08:00
|
|
|
int nestlevel = -1;
|
2012-08-03 01:10:30 +08:00
|
|
|
PGresult *res;
|
|
|
|
|
|
|
|
if (!PQsendQuery(conn, sql))
|
2017-02-27 21:30:06 +08:00
|
|
|
elog(ERROR, "could not send query: %s", pchomp(PQerrorMessage(conn)));
|
2012-08-03 01:10:30 +08:00
|
|
|
|
|
|
|
if (!PQsetSingleRowMode(conn)) /* shouldn't fail */
|
|
|
|
elog(ERROR, "failed to set single-row mode for dblink query");
|
|
|
|
|
|
|
|
for (;;)
|
|
|
|
{
|
|
|
|
CHECK_FOR_INTERRUPTS();
|
|
|
|
|
|
|
|
sinfo->cur_res = PQgetResult(conn);
|
|
|
|
if (!sinfo->cur_res)
|
|
|
|
break;
|
|
|
|
|
|
|
|
if (PQresultStatus(sinfo->cur_res) == PGRES_SINGLE_TUPLE)
|
|
|
|
{
|
|
|
|
/* got one row from possibly-bigger resultset */
|
2013-03-23 03:22:15 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Set GUCs to ensure we read GUC-sensitive data types correctly.
|
|
|
|
* We shouldn't do this until we have a row in hand, to ensure
|
|
|
|
* libpq has seen any earlier ParameterStatus protocol messages.
|
|
|
|
*/
|
|
|
|
if (first && nestlevel < 0)
|
|
|
|
nestlevel = applyRemoteGucs(conn);
|
|
|
|
|
2012-08-03 01:10:30 +08:00
|
|
|
storeRow(sinfo, sinfo->cur_res, first);
|
|
|
|
|
|
|
|
PQclear(sinfo->cur_res);
|
|
|
|
sinfo->cur_res = NULL;
|
|
|
|
first = false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/* if empty resultset, fill tuplestore header */
|
|
|
|
if (first && PQresultStatus(sinfo->cur_res) == PGRES_TUPLES_OK)
|
|
|
|
storeRow(sinfo, sinfo->cur_res, first);
|
|
|
|
|
|
|
|
/* store completed result at last_res */
|
|
|
|
PQclear(sinfo->last_res);
|
|
|
|
sinfo->last_res = sinfo->cur_res;
|
|
|
|
sinfo->cur_res = NULL;
|
|
|
|
first = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-03-23 03:22:15 +08:00
|
|
|
/* clean up GUC settings, if we changed any */
|
|
|
|
restoreLocalGucs(nestlevel);
|
|
|
|
|
2012-08-03 01:10:30 +08:00
|
|
|
/* return last_res */
|
|
|
|
res = sinfo->last_res;
|
|
|
|
sinfo->last_res = NULL;
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send single row to sinfo->tuplestore.
|
|
|
|
*
|
|
|
|
* If "first" is true, create the tuplestore using PGresult's metadata
|
|
|
|
* (in this case the PGresult might contain either zero or one row).
|
2012-04-05 06:39:08 +08:00
|
|
|
*/
|
2012-08-03 01:10:30 +08:00
|
|
|
static void
|
2015-01-27 04:17:33 +08:00
|
|
|
storeRow(volatile storeInfo *sinfo, PGresult *res, bool first)
|
2012-04-05 06:39:08 +08:00
|
|
|
{
|
|
|
|
int nfields = PQnfields(res);
|
|
|
|
HeapTuple tuple;
|
|
|
|
int i;
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
|
2012-08-03 01:10:30 +08:00
|
|
|
if (first)
|
2012-04-05 06:39:08 +08:00
|
|
|
{
|
|
|
|
/* Prepare for new result set */
|
|
|
|
ReturnSetInfo *rsinfo = (ReturnSetInfo *) sinfo->fcinfo->resultinfo;
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* It's possible to get more than one result set if the query string
|
|
|
|
* contained multiple SQL commands. In that case, we follow PQexec's
|
|
|
|
* traditional behavior of throwing away all but the last result.
|
|
|
|
*/
|
|
|
|
if (sinfo->tuplestore)
|
|
|
|
tuplestore_end(sinfo->tuplestore);
|
|
|
|
sinfo->tuplestore = NULL;
|
|
|
|
|
|
|
|
/* get a tuple descriptor for our result type */
|
|
|
|
switch (get_call_result_type(sinfo->fcinfo, NULL, &tupdesc))
|
|
|
|
{
|
|
|
|
case TYPEFUNC_COMPOSITE:
|
|
|
|
/* success */
|
|
|
|
break;
|
|
|
|
case TYPEFUNC_RECORD:
|
|
|
|
/* failed to determine actual type of RECORD */
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
|
|
|
errmsg("function returning record called in context "
|
|
|
|
"that cannot accept type record")));
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* result type isn't composite */
|
|
|
|
elog(ERROR, "return type must be a row type");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* make sure we have a persistent copy of the tupdesc */
|
|
|
|
tupdesc = CreateTupleDescCopy(tupdesc);
|
|
|
|
|
|
|
|
/* check result and tuple descriptor have the same number of columns */
|
|
|
|
if (nfields != tupdesc->natts)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_DATATYPE_MISMATCH),
|
|
|
|
errmsg("remote query result rowtype does not match "
|
|
|
|
"the specified FROM clause rowtype")));
|
|
|
|
|
|
|
|
/* Prepare attinmeta for later data conversions */
|
|
|
|
sinfo->attinmeta = TupleDescGetAttInMetadata(tupdesc);
|
|
|
|
|
|
|
|
/* Create a new, empty tuplestore */
|
2012-08-03 01:10:30 +08:00
|
|
|
oldcontext = MemoryContextSwitchTo(rsinfo->econtext->ecxt_per_query_memory);
|
2012-04-05 06:39:08 +08:00
|
|
|
sinfo->tuplestore = tuplestore_begin_heap(true, false, work_mem);
|
|
|
|
rsinfo->setResult = sinfo->tuplestore;
|
|
|
|
rsinfo->setDesc = tupdesc;
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
|
2012-08-03 01:10:30 +08:00
|
|
|
/* Done if empty resultset */
|
|
|
|
if (PQntuples(res) == 0)
|
|
|
|
return;
|
|
|
|
|
2012-04-05 06:39:08 +08:00
|
|
|
/*
|
|
|
|
* Set up sufficiently-wide string pointers array; this won't change
|
|
|
|
* in size so it's easy to preallocate.
|
|
|
|
*/
|
|
|
|
if (sinfo->cstrs)
|
|
|
|
pfree(sinfo->cstrs);
|
|
|
|
sinfo->cstrs = (char **) palloc(nfields * sizeof(char *));
|
|
|
|
}
|
|
|
|
|
2012-08-03 01:10:30 +08:00
|
|
|
/* Should have a single-row result if we get here */
|
|
|
|
Assert(PQntuples(res) == 1);
|
2012-04-05 06:39:08 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Do the following work in a temp context that we reset after each tuple.
|
|
|
|
* This cleans up not only the data we have direct access to, but any
|
|
|
|
* cruft the I/O functions might leak.
|
|
|
|
*/
|
|
|
|
oldcontext = MemoryContextSwitchTo(sinfo->tmpcontext);
|
|
|
|
|
|
|
|
/*
|
2012-08-03 01:10:30 +08:00
|
|
|
* Fill cstrs with null-terminated strings of column values.
|
2012-04-05 06:39:08 +08:00
|
|
|
*/
|
|
|
|
for (i = 0; i < nfields; i++)
|
|
|
|
{
|
2012-08-03 01:10:30 +08:00
|
|
|
if (PQgetisnull(res, 0, i))
|
|
|
|
sinfo->cstrs[i] = NULL;
|
2012-04-05 06:39:08 +08:00
|
|
|
else
|
2012-08-03 01:10:30 +08:00
|
|
|
sinfo->cstrs[i] = PQgetvalue(res, 0, i);
|
2012-04-05 06:39:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Convert row to a tuple, and add it to the tuplestore */
|
2012-08-03 01:10:30 +08:00
|
|
|
tuple = BuildTupleFromCStrings(sinfo->attinmeta, sinfo->cstrs);
|
2012-04-05 06:39:08 +08:00
|
|
|
|
|
|
|
tuplestore_puttuple(sinfo->tuplestore, tuple);
|
|
|
|
|
|
|
|
/* Clean up */
|
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
MemoryContextReset(sinfo->tmpcontext);
|
|
|
|
}
|
|
|
|
|
2006-09-03 05:11:15 +08:00
|
|
|
/*
|
|
|
|
* List all open dblink connections by name.
|
|
|
|
* Returns an array of all connection names.
|
|
|
|
* Takes no params
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_get_connections);
|
|
|
|
Datum
|
|
|
|
dblink_get_connections(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
HASH_SEQ_STATUS status;
|
|
|
|
remoteConnHashEnt *hentry;
|
|
|
|
ArrayBuildState *astate = NULL;
|
|
|
|
|
|
|
|
if (remoteConnHash)
|
|
|
|
{
|
|
|
|
hash_seq_init(&status, remoteConnHash);
|
|
|
|
while ((hentry = (remoteConnHashEnt *) hash_seq_search(&status)) != NULL)
|
|
|
|
{
|
|
|
|
/* stash away current value */
|
|
|
|
astate = accumArrayResult(astate,
|
2008-03-26 06:42:46 +08:00
|
|
|
CStringGetTextDatum(hentry->name),
|
2006-09-03 05:11:15 +08:00
|
|
|
false, TEXTOID, CurrentMemoryContext);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (astate)
|
|
|
|
PG_RETURN_ARRAYTYPE_P(makeArrayResult(astate,
|
|
|
|
CurrentMemoryContext));
|
|
|
|
else
|
|
|
|
PG_RETURN_NULL();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Checks if a given remote connection is busy
|
|
|
|
*
|
|
|
|
* Returns 1 if the connection is busy, 0 otherwise
|
|
|
|
* Params:
|
|
|
|
* text connection_name - name of the connection to check
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_is_busy);
|
|
|
|
Datum
|
|
|
|
dblink_is_busy(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2016-12-26 01:00:00 +08:00
|
|
|
PGconn *conn;
|
2006-09-03 05:11:15 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_init();
|
|
|
|
conn = dblink_get_named_conn(text_to_cstring(PG_GETARG_TEXT_PP(0)));
|
2006-09-03 05:11:15 +08:00
|
|
|
|
|
|
|
PQconsumeInput(conn);
|
|
|
|
PG_RETURN_INT32(PQisBusy(conn));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cancels a running request on a connection
|
|
|
|
*
|
|
|
|
* Returns text:
|
|
|
|
* "OK" if the cancel request has been sent correctly,
|
|
|
|
* an error message otherwise
|
|
|
|
*
|
|
|
|
* Params:
|
|
|
|
* text connection_name - name of the connection to check
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_cancel_query);
|
|
|
|
Datum
|
|
|
|
dblink_cancel_query(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2016-12-26 01:00:00 +08:00
|
|
|
int res;
|
|
|
|
PGconn *conn;
|
2006-09-03 05:11:15 +08:00
|
|
|
PGcancel *cancel;
|
|
|
|
char errbuf[256];
|
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_init();
|
|
|
|
conn = dblink_get_named_conn(text_to_cstring(PG_GETARG_TEXT_PP(0)));
|
2006-09-03 05:11:15 +08:00
|
|
|
cancel = PQgetCancel(conn);
|
|
|
|
|
|
|
|
res = PQcancel(cancel, errbuf, 256);
|
|
|
|
PQfreeCancel(cancel);
|
|
|
|
|
2008-01-04 05:27:59 +08:00
|
|
|
if (res == 1)
|
2008-03-26 06:42:46 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text("OK"));
|
2006-09-03 05:11:15 +08:00
|
|
|
else
|
2008-03-26 06:42:46 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text(errbuf));
|
2006-09-03 05:11:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get error message from a connection
|
|
|
|
*
|
|
|
|
* Returns text:
|
|
|
|
* "OK" if no error, an error message otherwise
|
|
|
|
*
|
|
|
|
* Params:
|
|
|
|
* text connection_name - name of the connection to check
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_error_message);
|
|
|
|
Datum
|
|
|
|
dblink_error_message(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
char *msg;
|
2016-12-26 01:00:00 +08:00
|
|
|
PGconn *conn;
|
2006-09-03 05:11:15 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_init();
|
|
|
|
conn = dblink_get_named_conn(text_to_cstring(PG_GETARG_TEXT_PP(0)));
|
2006-09-03 05:11:15 +08:00
|
|
|
|
|
|
|
msg = PQerrorMessage(conn);
|
2008-01-04 05:27:59 +08:00
|
|
|
if (msg == NULL || msg[0] == '\0')
|
2008-03-26 06:42:46 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text("OK"));
|
2006-09-03 05:11:15 +08:00
|
|
|
else
|
2017-02-27 21:30:06 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text(pchomp(msg)));
|
2006-09-03 05:11:15 +08:00
|
|
|
}
|
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
/*
|
|
|
|
* Execute an SQL non-SELECT command
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_exec);
|
|
|
|
Datum
|
|
|
|
dblink_exec(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2012-04-04 08:43:15 +08:00
|
|
|
text *volatile sql_cmd_status = NULL;
|
|
|
|
PGconn *volatile conn = NULL;
|
|
|
|
volatile bool freeconn = false;
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_init();
|
2005-10-18 10:55:49 +08:00
|
|
|
|
2012-04-04 08:43:15 +08:00
|
|
|
PG_TRY();
|
2004-03-07 10:27:00 +08:00
|
|
|
{
|
2012-04-04 08:43:15 +08:00
|
|
|
PGresult *res = NULL;
|
|
|
|
char *sql = NULL;
|
|
|
|
char *conname = NULL;
|
|
|
|
bool fail = true; /* default to backward compatible behavior */
|
|
|
|
|
|
|
|
if (PG_NARGS() == 3)
|
|
|
|
{
|
|
|
|
/* must be text,text,bool */
|
2017-03-28 23:08:38 +08:00
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2012-04-04 08:43:15 +08:00
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
|
|
|
fail = PG_GETARG_BOOL(2);
|
2017-03-28 23:08:38 +08:00
|
|
|
dblink_get_conn(conname, &conn, &conname, &freeconn);
|
2012-04-04 08:43:15 +08:00
|
|
|
}
|
|
|
|
else if (PG_NARGS() == 2)
|
|
|
|
{
|
|
|
|
/* might be text,text or text,bool */
|
|
|
|
if (get_fn_expr_argtype(fcinfo->flinfo, 1) == BOOLOID)
|
|
|
|
{
|
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
|
|
|
fail = PG_GETARG_BOOL(1);
|
2017-03-28 23:08:38 +08:00
|
|
|
conn = pconn->conn;
|
2012-04-04 08:43:15 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2017-03-28 23:08:38 +08:00
|
|
|
conname = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2012-04-04 08:43:15 +08:00
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(1));
|
2017-03-28 23:08:38 +08:00
|
|
|
dblink_get_conn(conname, &conn, &conname, &freeconn);
|
2012-04-04 08:43:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
else if (PG_NARGS() == 1)
|
2004-03-07 10:27:00 +08:00
|
|
|
{
|
2012-04-04 08:43:15 +08:00
|
|
|
/* must be single text argument */
|
2005-10-18 10:55:49 +08:00
|
|
|
conn = pconn->conn;
|
2008-03-26 06:42:46 +08:00
|
|
|
sql = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
2004-03-07 10:27:00 +08:00
|
|
|
}
|
|
|
|
else
|
2012-04-04 08:43:15 +08:00
|
|
|
/* shouldn't happen */
|
|
|
|
elog(ERROR, "wrong number of arguments");
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2012-04-04 08:43:15 +08:00
|
|
|
if (!conn)
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_conn_not_avail(conname);
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2012-04-04 08:43:15 +08:00
|
|
|
res = PQexec(conn, sql);
|
|
|
|
if (!res ||
|
|
|
|
(PQresultStatus(res) != PGRES_COMMAND_OK &&
|
|
|
|
PQresultStatus(res) != PGRES_TUPLES_OK))
|
|
|
|
{
|
2018-03-23 05:33:10 +08:00
|
|
|
dblink_res_error(conn, conname, res, fail,
|
|
|
|
"while executing command");
|
2003-06-25 09:10:15 +08:00
|
|
|
|
2012-04-04 08:43:15 +08:00
|
|
|
/*
|
|
|
|
* and save a copy of the command status string to return as our
|
|
|
|
* result tuple
|
|
|
|
*/
|
|
|
|
sql_cmd_status = cstring_to_text("ERROR");
|
|
|
|
}
|
|
|
|
else if (PQresultStatus(res) == PGRES_COMMAND_OK)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* and save a copy of the command status string to return as our
|
|
|
|
* result tuple
|
|
|
|
*/
|
|
|
|
sql_cmd_status = cstring_to_text(PQcmdStatus(res));
|
|
|
|
PQclear(res);
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
PQclear(res);
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
|
|
|
|
errmsg("statement returning results not allowed")));
|
|
|
|
}
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
2019-11-01 18:09:52 +08:00
|
|
|
PG_FINALLY();
|
2004-09-28 08:49:04 +08:00
|
|
|
{
|
2012-04-04 08:43:15 +08:00
|
|
|
/* if needed, close the connection to the database */
|
|
|
|
if (freeconn)
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
{
|
2012-04-04 08:43:15 +08:00
|
|
|
PQfinish(conn);
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
ReleaseExternalFD();
|
|
|
|
}
|
2004-09-28 08:49:04 +08:00
|
|
|
}
|
2012-04-04 08:43:15 +08:00
|
|
|
PG_END_TRY();
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2003-06-25 09:10:15 +08:00
|
|
|
PG_RETURN_TEXT_P(sql_cmd_status);
|
2002-09-02 14:13:31 +08:00
|
|
|
}
|
|
|
|
|
2001-06-19 03:09:50 +08:00
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
/*
|
|
|
|
* dblink_get_pkey
|
|
|
|
*
|
2002-09-02 14:13:31 +08:00
|
|
|
* Return list of primary key fields for the supplied relation,
|
2002-04-24 10:28:28 +08:00
|
|
|
* or NULL if none exists.
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_get_pkey);
|
|
|
|
Datum
|
|
|
|
dblink_get_pkey(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2018-04-08 04:00:39 +08:00
|
|
|
int16 indnkeyatts;
|
2002-09-02 14:13:31 +08:00
|
|
|
char **results;
|
|
|
|
FuncCallContext *funcctx;
|
|
|
|
int32 call_cntr;
|
|
|
|
int32 max_calls;
|
|
|
|
AttInMetadata *attinmeta;
|
|
|
|
MemoryContext oldcontext;
|
|
|
|
|
|
|
|
/* stuff done only on the first call of the function */
|
|
|
|
if (SRF_IS_FIRSTCALL())
|
|
|
|
{
|
2010-06-15 04:49:33 +08:00
|
|
|
Relation rel;
|
|
|
|
TupleDesc tupdesc;
|
2002-09-02 14:13:31 +08:00
|
|
|
|
|
|
|
/* create a function context for cross-call persistence */
|
|
|
|
funcctx = SRF_FIRSTCALL_INIT();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* switch to memory context appropriate for multiple function calls
|
|
|
|
*/
|
|
|
|
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
|
|
|
|
|
2010-06-15 04:49:33 +08:00
|
|
|
/* open target relation */
|
2017-03-13 07:35:34 +08:00
|
|
|
rel = get_rel_from_relname(PG_GETARG_TEXT_PP(0), AccessShareLock, ACL_SELECT);
|
2010-06-15 04:49:33 +08:00
|
|
|
|
|
|
|
/* get the array of attnums */
|
2018-04-08 04:00:39 +08:00
|
|
|
results = get_pkey_attnames(rel, &indnkeyatts);
|
2010-06-15 04:49:33 +08:00
|
|
|
|
|
|
|
relation_close(rel, AccessShareLock);
|
2003-08-04 08:43:34 +08:00
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
/*
|
|
|
|
* need a tuple descriptor representing one INT and one TEXT column
|
|
|
|
*/
|
Remove WITH OIDS support, change oid catalog column visibility.
Previously tables declared WITH OIDS, including a significant fraction
of the catalog tables, stored the oid column not as a normal column,
but as part of the tuple header.
This special column was not shown by default, which was somewhat odd,
as it's often (consider e.g. pg_class.oid) one of the more important
parts of a row. Neither pg_dump nor COPY included the contents of the
oid column by default.
The fact that the oid column was not an ordinary column necessitated a
significant amount of special case code to support oid columns. That
already was painful for the existing, but upcoming work aiming to make
table storage pluggable, would have required expanding and duplicating
that "specialness" significantly.
WITH OIDS has been deprecated since 2005 (commit ff02d0a05280e0).
Remove it.
Removing includes:
- CREATE TABLE and ALTER TABLE syntax for declaring the table to be
WITH OIDS has been removed (WITH (oids[ = true]) will error out)
- pg_dump does not support dumping tables declared WITH OIDS and will
issue a warning when dumping one (and ignore the oid column).
- restoring an pg_dump archive with pg_restore will warn when
restoring a table with oid contents (and ignore the oid column)
- COPY will refuse to load binary dump that includes oids.
- pg_upgrade will error out when encountering tables declared WITH
OIDS, they have to be altered to remove the oid column first.
- Functionality to access the oid of the last inserted row (like
plpgsql's RESULT_OID, spi's SPI_lastoid, ...) has been removed.
The syntax for declaring a table WITHOUT OIDS (or WITH (oids = false)
for CREATE TABLE) is still supported. While that requires a bit of
support code, it seems unnecessary to break applications / dumps that
do not use oids, and are explicit about not using them.
The biggest user of WITH OID columns was postgres' catalog. This
commit changes all 'magic' oid columns to be columns that are normally
declared and stored. To reduce unnecessary query breakage all the
newly added columns are still named 'oid', even if a table's column
naming scheme would indicate 'reloid' or such. This obviously
requires adapting a lot code, mostly replacing oid access via
HeapTupleGetOid() with access to the underlying Form_pg_*->oid column.
The bootstrap process now assigns oids for all oid columns in
genbki.pl that do not have an explicit value (starting at the largest
oid previously used), only oids assigned later by oids will be above
FirstBootstrapObjectId. As the oid column now is a normal column the
special bootstrap syntax for oids has been removed.
Oids are not automatically assigned during insertion anymore, all
backend code explicitly assigns oids with GetNewOidWithIndex(). For
the rare case that insertions into the catalog via SQL are called for
the new pg_nextoid() function can be used (which only works on catalog
tables).
The fact that oid columns on system tables are now normal columns
means that they will be included in the set of columns expanded
by * (i.e. SELECT * FROM pg_class will now include the table's oid,
previously it did not). It'd not technically be hard to hide oid
column by default, but that'd mean confusing behavior would either
have to be carried forward forever, or it'd cause breakage down the
line.
While it's not unlikely that further adjustments are needed, the
scope/invasiveness of the patch makes it worthwhile to get merge this
now. It's painful to maintain externally, too complicated to commit
after the code code freeze, and a dependency of a number of other
patches.
Catversion bump, for obvious reasons.
Author: Andres Freund, with contributions by John Naylor
Discussion: https://postgr.es/m/20180930034810.ywp2c7awz7opzcfr@alap3.anarazel.de
2018-11-21 07:36:57 +08:00
|
|
|
tupdesc = CreateTemplateTupleDesc(2);
|
2002-09-02 14:13:31 +08:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 1, "position",
|
2004-04-02 05:28:47 +08:00
|
|
|
INT4OID, -1, 0);
|
2002-09-02 14:13:31 +08:00
|
|
|
TupleDescInitEntry(tupdesc, (AttrNumber) 2, "colname",
|
2004-04-02 05:28:47 +08:00
|
|
|
TEXTOID, -1, 0);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
2002-09-02 14:13:31 +08:00
|
|
|
* Generate attribute metadata needed later to produce tuples from raw
|
|
|
|
* C strings
|
2002-04-24 10:28:28 +08:00
|
|
|
*/
|
2002-09-02 14:13:31 +08:00
|
|
|
attinmeta = TupleDescGetAttInMetadata(tupdesc);
|
|
|
|
funcctx->attinmeta = attinmeta;
|
|
|
|
|
2018-04-08 04:00:39 +08:00
|
|
|
if ((results != NULL) && (indnkeyatts > 0))
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
2018-04-08 04:00:39 +08:00
|
|
|
funcctx->max_calls = indnkeyatts;
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
/* got results, keep track of them */
|
|
|
|
funcctx->user_fctx = results;
|
|
|
|
}
|
|
|
|
else
|
2008-12-01 07:23:52 +08:00
|
|
|
{
|
2003-06-25 09:10:15 +08:00
|
|
|
/* fast track when no results */
|
2008-12-01 07:23:52 +08:00
|
|
|
MemoryContextSwitchTo(oldcontext);
|
2002-09-02 14:13:31 +08:00
|
|
|
SRF_RETURN_DONE(funcctx);
|
2008-12-01 07:23:52 +08:00
|
|
|
}
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
MemoryContextSwitchTo(oldcontext);
|
|
|
|
}
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
/* stuff done on every call of the function */
|
|
|
|
funcctx = SRF_PERCALL_SETUP();
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
/*
|
|
|
|
* initialize per-call variables
|
|
|
|
*/
|
|
|
|
call_cntr = funcctx->call_cntr;
|
|
|
|
max_calls = funcctx->max_calls;
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
results = (char **) funcctx->user_fctx;
|
|
|
|
attinmeta = funcctx->attinmeta;
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
if (call_cntr < max_calls) /* do when there is more left to send */
|
|
|
|
{
|
|
|
|
char **values;
|
|
|
|
HeapTuple tuple;
|
|
|
|
Datum result;
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
values = (char **) palloc(2 * sizeof(char *));
|
2014-01-07 10:30:26 +08:00
|
|
|
values[0] = psprintf("%d", call_cntr + 1);
|
2002-09-02 14:13:31 +08:00
|
|
|
values[1] = results[call_cntr];
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
/* build the tuple */
|
|
|
|
tuple = BuildTupleFromCStrings(attinmeta, values);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
/* make the tuple into a datum */
|
2004-04-02 05:28:47 +08:00
|
|
|
result = HeapTupleGetDatum(tuple);
|
2002-09-02 14:13:31 +08:00
|
|
|
|
|
|
|
SRF_RETURN_NEXT(funcctx, result);
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
2002-09-02 14:13:31 +08:00
|
|
|
else
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
2003-06-25 09:10:15 +08:00
|
|
|
/* do when there is no more left */
|
|
|
|
SRF_RETURN_DONE(funcctx);
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dblink_build_sql_insert
|
|
|
|
*
|
|
|
|
* Used to generate an SQL insert statement
|
|
|
|
* based on an existing tuple in a local relation.
|
|
|
|
* This is useful for selectively replicating data
|
|
|
|
* to another server via dblink.
|
|
|
|
*
|
|
|
|
* API:
|
|
|
|
* <relname> - name of local table of interest
|
|
|
|
* <pkattnums> - an int2vector of attnums which will be used
|
|
|
|
* to identify the local tuple of interest
|
|
|
|
* <pknumatts> - number of attnums in pkattnums
|
|
|
|
* <src_pkattvals_arry> - text array of key values which will be used
|
|
|
|
* to identify the local tuple of interest
|
|
|
|
* <tgt_pkattvals_arry> - text array of key values which will be used
|
|
|
|
* to build the string for execution remotely. These are substituted
|
|
|
|
* for their counterparts in src_pkattvals_arry
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_build_sql_insert);
|
|
|
|
Datum
|
|
|
|
dblink_build_sql_insert(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2017-03-13 07:35:34 +08:00
|
|
|
text *relname_text = PG_GETARG_TEXT_PP(0);
|
2010-06-16 00:22:19 +08:00
|
|
|
int2vector *pkattnums_arg = (int2vector *) PG_GETARG_POINTER(1);
|
|
|
|
int32 pknumatts_arg = PG_GETARG_INT32(2);
|
2005-11-18 10:38:24 +08:00
|
|
|
ArrayType *src_pkattvals_arry = PG_GETARG_ARRAYTYPE_P(3);
|
|
|
|
ArrayType *tgt_pkattvals_arry = PG_GETARG_ARRAYTYPE_P(4);
|
2010-06-15 04:49:33 +08:00
|
|
|
Relation rel;
|
2010-06-16 00:22:19 +08:00
|
|
|
int *pkattnums;
|
|
|
|
int pknumatts;
|
2002-09-02 14:13:31 +08:00
|
|
|
char **src_pkattvals;
|
|
|
|
char **tgt_pkattvals;
|
2002-04-24 10:28:28 +08:00
|
|
|
int src_nitems;
|
|
|
|
int tgt_nitems;
|
2002-09-02 14:13:31 +08:00
|
|
|
char *sql;
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
2010-06-15 04:49:33 +08:00
|
|
|
* Open target relation.
|
2002-04-24 10:28:28 +08:00
|
|
|
*/
|
2010-06-15 04:49:33 +08:00
|
|
|
rel = get_rel_from_relname(relname_text, AccessShareLock, ACL_SELECT);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
2010-06-16 00:22:19 +08:00
|
|
|
* Process pkattnums argument.
|
2002-04-24 10:28:28 +08:00
|
|
|
*/
|
2010-06-16 00:22:19 +08:00
|
|
|
validate_pkattnums(rel, pkattnums_arg, pknumatts_arg,
|
|
|
|
&pkattnums, &pknumatts);
|
2010-02-04 07:01:11 +08:00
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
/*
|
|
|
|
* Source array is made up of key values that will be used to locate the
|
|
|
|
* tuple of interest from the local system.
|
|
|
|
*/
|
2005-11-18 10:38:24 +08:00
|
|
|
src_pkattvals = get_text_array_contents(src_pkattvals_arry, &src_nitems);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There should be one source array key value for each key attnum
|
|
|
|
*/
|
|
|
|
if (src_nitems != pknumatts)
|
2003-07-25 01:52:50 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
2019-12-24 23:37:13 +08:00
|
|
|
errmsg("source key array length must match number of key attributes")));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Target array is made up of key values that will be used to build the
|
|
|
|
* SQL string for use on the remote system.
|
|
|
|
*/
|
2005-11-18 10:38:24 +08:00
|
|
|
tgt_pkattvals = get_text_array_contents(tgt_pkattvals_arry, &tgt_nitems);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There should be one target array key value for each key attnum
|
|
|
|
*/
|
|
|
|
if (tgt_nitems != pknumatts)
|
2003-07-25 01:52:50 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
2019-12-24 23:37:13 +08:00
|
|
|
errmsg("target key array length must match number of key attributes")));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prep work is finally done. Go get the SQL string.
|
|
|
|
*/
|
2010-06-15 04:49:33 +08:00
|
|
|
sql = get_sql_insert(rel, pkattnums, pknumatts, src_pkattvals, tgt_pkattvals);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now we can close the relation.
|
|
|
|
*/
|
|
|
|
relation_close(rel, AccessShareLock);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* And send it
|
|
|
|
*/
|
2008-03-26 06:42:46 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text(sql));
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dblink_build_sql_delete
|
|
|
|
*
|
|
|
|
* Used to generate an SQL delete statement.
|
|
|
|
* This is useful for selectively replicating a
|
|
|
|
* delete to another server via dblink.
|
|
|
|
*
|
|
|
|
* API:
|
|
|
|
* <relname> - name of remote table of interest
|
|
|
|
* <pkattnums> - an int2vector of attnums which will be used
|
|
|
|
* to identify the remote tuple of interest
|
|
|
|
* <pknumatts> - number of attnums in pkattnums
|
|
|
|
* <tgt_pkattvals_arry> - text array of key values which will be used
|
|
|
|
* to build the string for execution remotely.
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_build_sql_delete);
|
|
|
|
Datum
|
|
|
|
dblink_build_sql_delete(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2017-03-13 07:35:34 +08:00
|
|
|
text *relname_text = PG_GETARG_TEXT_PP(0);
|
2010-06-16 00:22:19 +08:00
|
|
|
int2vector *pkattnums_arg = (int2vector *) PG_GETARG_POINTER(1);
|
|
|
|
int32 pknumatts_arg = PG_GETARG_INT32(2);
|
2005-11-18 10:38:24 +08:00
|
|
|
ArrayType *tgt_pkattvals_arry = PG_GETARG_ARRAYTYPE_P(3);
|
2010-06-15 04:49:33 +08:00
|
|
|
Relation rel;
|
2010-06-16 00:22:19 +08:00
|
|
|
int *pkattnums;
|
|
|
|
int pknumatts;
|
2002-04-24 10:28:28 +08:00
|
|
|
char **tgt_pkattvals;
|
|
|
|
int tgt_nitems;
|
|
|
|
char *sql;
|
|
|
|
|
|
|
|
/*
|
2010-06-15 04:49:33 +08:00
|
|
|
* Open target relation.
|
2002-04-24 10:28:28 +08:00
|
|
|
*/
|
2010-06-15 04:49:33 +08:00
|
|
|
rel = get_rel_from_relname(relname_text, AccessShareLock, ACL_SELECT);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
2010-06-16 00:22:19 +08:00
|
|
|
* Process pkattnums argument.
|
2002-04-24 10:28:28 +08:00
|
|
|
*/
|
2010-06-16 00:22:19 +08:00
|
|
|
validate_pkattnums(rel, pkattnums_arg, pknumatts_arg,
|
|
|
|
&pkattnums, &pknumatts);
|
2010-02-04 07:01:11 +08:00
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
/*
|
|
|
|
* Target array is made up of key values that will be used to build the
|
|
|
|
* SQL string for use on the remote system.
|
|
|
|
*/
|
2005-11-18 10:38:24 +08:00
|
|
|
tgt_pkattvals = get_text_array_contents(tgt_pkattvals_arry, &tgt_nitems);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There should be one target array key value for each key attnum
|
|
|
|
*/
|
|
|
|
if (tgt_nitems != pknumatts)
|
2003-07-25 01:52:50 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
2019-12-24 23:37:13 +08:00
|
|
|
errmsg("target key array length must match number of key attributes")));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prep work is finally done. Go get the SQL string.
|
|
|
|
*/
|
2010-06-15 04:49:33 +08:00
|
|
|
sql = get_sql_delete(rel, pkattnums, pknumatts, tgt_pkattvals);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now we can close the relation.
|
|
|
|
*/
|
|
|
|
relation_close(rel, AccessShareLock);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* And send it
|
|
|
|
*/
|
2008-03-26 06:42:46 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text(sql));
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
* dblink_build_sql_update
|
|
|
|
*
|
|
|
|
* Used to generate an SQL update statement
|
|
|
|
* based on an existing tuple in a local relation.
|
|
|
|
* This is useful for selectively replicating data
|
|
|
|
* to another server via dblink.
|
|
|
|
*
|
|
|
|
* API:
|
|
|
|
* <relname> - name of local table of interest
|
|
|
|
* <pkattnums> - an int2vector of attnums which will be used
|
|
|
|
* to identify the local tuple of interest
|
|
|
|
* <pknumatts> - number of attnums in pkattnums
|
|
|
|
* <src_pkattvals_arry> - text array of key values which will be used
|
|
|
|
* to identify the local tuple of interest
|
|
|
|
* <tgt_pkattvals_arry> - text array of key values which will be used
|
|
|
|
* to build the string for execution remotely. These are substituted
|
|
|
|
* for their counterparts in src_pkattvals_arry
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_build_sql_update);
|
|
|
|
Datum
|
|
|
|
dblink_build_sql_update(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2017-03-13 07:35:34 +08:00
|
|
|
text *relname_text = PG_GETARG_TEXT_PP(0);
|
2010-06-16 00:22:19 +08:00
|
|
|
int2vector *pkattnums_arg = (int2vector *) PG_GETARG_POINTER(1);
|
|
|
|
int32 pknumatts_arg = PG_GETARG_INT32(2);
|
2005-11-18 10:38:24 +08:00
|
|
|
ArrayType *src_pkattvals_arry = PG_GETARG_ARRAYTYPE_P(3);
|
|
|
|
ArrayType *tgt_pkattvals_arry = PG_GETARG_ARRAYTYPE_P(4);
|
2010-06-15 04:49:33 +08:00
|
|
|
Relation rel;
|
2010-06-16 00:22:19 +08:00
|
|
|
int *pkattnums;
|
|
|
|
int pknumatts;
|
2002-04-24 10:28:28 +08:00
|
|
|
char **src_pkattvals;
|
|
|
|
char **tgt_pkattvals;
|
|
|
|
int src_nitems;
|
|
|
|
int tgt_nitems;
|
|
|
|
char *sql;
|
|
|
|
|
|
|
|
/*
|
2010-06-15 04:49:33 +08:00
|
|
|
* Open target relation.
|
2002-04-24 10:28:28 +08:00
|
|
|
*/
|
2010-06-15 04:49:33 +08:00
|
|
|
rel = get_rel_from_relname(relname_text, AccessShareLock, ACL_SELECT);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
2010-06-16 00:22:19 +08:00
|
|
|
* Process pkattnums argument.
|
2010-02-04 07:01:11 +08:00
|
|
|
*/
|
2010-06-16 00:22:19 +08:00
|
|
|
validate_pkattnums(rel, pkattnums_arg, pknumatts_arg,
|
|
|
|
&pkattnums, &pknumatts);
|
2010-02-04 07:01:11 +08:00
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
/*
|
|
|
|
* Source array is made up of key values that will be used to locate the
|
|
|
|
* tuple of interest from the local system.
|
|
|
|
*/
|
2005-11-18 10:38:24 +08:00
|
|
|
src_pkattvals = get_text_array_contents(src_pkattvals_arry, &src_nitems);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There should be one source array key value for each key attnum
|
|
|
|
*/
|
|
|
|
if (src_nitems != pknumatts)
|
2003-07-25 01:52:50 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
2019-12-24 23:37:13 +08:00
|
|
|
errmsg("source key array length must match number of key attributes")));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Target array is made up of key values that will be used to build the
|
|
|
|
* SQL string for use on the remote system.
|
|
|
|
*/
|
2005-11-18 10:38:24 +08:00
|
|
|
tgt_pkattvals = get_text_array_contents(tgt_pkattvals_arry, &tgt_nitems);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* There should be one target array key value for each key attnum
|
|
|
|
*/
|
|
|
|
if (tgt_nitems != pknumatts)
|
2003-07-25 01:52:50 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
|
2019-12-24 23:37:13 +08:00
|
|
|
errmsg("target key array length must match number of key attributes")));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Prep work is finally done. Go get the SQL string.
|
|
|
|
*/
|
2010-06-15 04:49:33 +08:00
|
|
|
sql = get_sql_update(rel, pkattnums, pknumatts, src_pkattvals, tgt_pkattvals);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now we can close the relation.
|
|
|
|
*/
|
|
|
|
relation_close(rel, AccessShareLock);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* And send it
|
|
|
|
*/
|
2008-03-26 06:42:46 +08:00
|
|
|
PG_RETURN_TEXT_P(cstring_to_text(sql));
|
2001-06-15 00:49:03 +08:00
|
|
|
}
|
|
|
|
|
2009-06-10 01:41:02 +08:00
|
|
|
/*
|
|
|
|
* dblink_current_query
|
|
|
|
* return the current query string
|
|
|
|
* to allow its use in (among other things)
|
|
|
|
* rewrite rules
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_current_query);
|
|
|
|
Datum
|
|
|
|
dblink_current_query(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
/* This is now just an alias for the built-in function current_query() */
|
|
|
|
PG_RETURN_DATUM(current_query(fcinfo));
|
|
|
|
}
|
|
|
|
|
2009-08-06 00:11:07 +08:00
|
|
|
/*
|
|
|
|
* Retrieve async notifications for a connection.
|
|
|
|
*
|
2012-04-24 20:15:45 +08:00
|
|
|
* Returns a setof record of notifications, or an empty set if none received.
|
|
|
|
* Can optionally take a named connection as parameter, but uses the unnamed
|
|
|
|
* connection per default.
|
2009-08-06 00:11:07 +08:00
|
|
|
*
|
|
|
|
*/
|
|
|
|
#define DBLINK_NOTIFY_COLS 3
|
|
|
|
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_get_notify);
|
|
|
|
Datum
|
|
|
|
dblink_get_notify(PG_FUNCTION_ARGS)
|
|
|
|
{
|
2016-12-26 01:00:00 +08:00
|
|
|
PGconn *conn;
|
2009-08-06 00:11:07 +08:00
|
|
|
PGnotify *notify;
|
|
|
|
ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
|
2012-04-04 08:43:15 +08:00
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
dblink_init();
|
2009-08-06 00:11:07 +08:00
|
|
|
if (PG_NARGS() == 1)
|
2016-12-26 01:00:00 +08:00
|
|
|
conn = dblink_get_named_conn(text_to_cstring(PG_GETARG_TEXT_PP(0)));
|
2009-08-06 00:11:07 +08:00
|
|
|
else
|
|
|
|
conn = pconn->conn;
|
|
|
|
|
2022-10-18 09:22:40 +08:00
|
|
|
InitMaterializedSRF(fcinfo, 0);
|
2009-08-06 00:11:07 +08:00
|
|
|
|
|
|
|
PQconsumeInput(conn);
|
|
|
|
while ((notify = PQnotifies(conn)) != NULL)
|
|
|
|
{
|
|
|
|
Datum values[DBLINK_NOTIFY_COLS];
|
|
|
|
bool nulls[DBLINK_NOTIFY_COLS];
|
|
|
|
|
|
|
|
memset(values, 0, sizeof(values));
|
|
|
|
memset(nulls, 0, sizeof(nulls));
|
|
|
|
|
|
|
|
if (notify->relname != NULL)
|
|
|
|
values[0] = CStringGetTextDatum(notify->relname);
|
|
|
|
else
|
|
|
|
nulls[0] = true;
|
|
|
|
|
|
|
|
values[1] = Int32GetDatum(notify->be_pid);
|
|
|
|
|
|
|
|
if (notify->extra != NULL)
|
|
|
|
values[2] = CStringGetTextDatum(notify->extra);
|
|
|
|
else
|
|
|
|
nulls[2] = true;
|
|
|
|
|
Simplify SRFs using materialize mode in contrib/ modules
9e98583 introduced a helper to centralize building their needed state
(tuplestore, tuple descriptors, etc.), checking for any errors. This
commit updates all places of contrib/ that can be switched to use
SetSingleFuncCall() as a drop-in replacement, resulting in the removal
of a lot of boilerplate code in all the modules updated by this commit.
Per analysis, some places remain as they are:
- pg_logdir_ls() in adminpack/ uses historically TYPEFUNC_RECORD as
return type, and I suspect that changing it may cause issues at run-time
with some of its past versions, down to 1.0.
- dblink/ uses a wrapper function doing exactly the work of
SetSingleFuncCall(). Here the switch should be possible, but rather
invasive so it does not seem the extra backpatch maintenance cost.
- tablefunc/, similarly, uses multiple helper functions with portions of
SetSingleFuncCall() spread across the code paths of this module.
Author: Melanie Plageman
Discussion: https://postgr.es/m/CAAKRu_bvDPJoL9mH6eYwvBpPtTGQwbDzfJbCM-OjkSZDu5yTPg@mail.gmail.com
2022-03-08 09:12:22 +08:00
|
|
|
tuplestore_putvalues(rsinfo->setResult, rsinfo->setDesc, values, nulls);
|
2009-08-06 00:11:07 +08:00
|
|
|
|
|
|
|
PQfreemem(notify);
|
|
|
|
PQconsumeInput(conn);
|
|
|
|
}
|
|
|
|
|
|
|
|
return (Datum) 0;
|
|
|
|
}
|
|
|
|
|
2012-10-11 04:53:08 +08:00
|
|
|
/*
|
|
|
|
* Validate the options given to a dblink foreign server or user mapping.
|
|
|
|
* Raise an error if any option is invalid.
|
|
|
|
*
|
|
|
|
* We just check the names of options here, so semantic errors in options,
|
|
|
|
* such as invalid numeric format, will be detected at the attempt to connect.
|
|
|
|
*/
|
|
|
|
PG_FUNCTION_INFO_V1(dblink_fdw_validator);
|
|
|
|
Datum
|
|
|
|
dblink_fdw_validator(PG_FUNCTION_ARGS)
|
|
|
|
{
|
|
|
|
List *options_list = untransformRelOptions(PG_GETARG_DATUM(0));
|
|
|
|
Oid context = PG_GETARG_OID(1);
|
|
|
|
ListCell *cell;
|
|
|
|
|
|
|
|
static const PQconninfoOption *options = NULL;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Get list of valid libpq options.
|
|
|
|
*
|
|
|
|
* To avoid unnecessary work, we get the list once and use it throughout
|
|
|
|
* the lifetime of this backend process. We don't need to care about
|
|
|
|
* memory context issues, because PQconndefaults allocates with malloc.
|
|
|
|
*/
|
|
|
|
if (!options)
|
|
|
|
{
|
|
|
|
options = PQconndefaults();
|
|
|
|
if (!options) /* assume reason for failure is OOM */
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FDW_OUT_OF_MEMORY),
|
|
|
|
errmsg("out of memory"),
|
2018-03-23 05:33:10 +08:00
|
|
|
errdetail("Could not get libpq's default connection options.")));
|
2012-10-11 04:53:08 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Validate each supplied option. */
|
|
|
|
foreach(cell, options_list)
|
|
|
|
{
|
|
|
|
DefElem *def = (DefElem *) lfirst(cell);
|
|
|
|
|
|
|
|
if (!is_valid_dblink_option(options, def->defname, context))
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Unknown option, or invalid option for the context specified, so
|
|
|
|
* complain about it. Provide a hint with list of valid options
|
|
|
|
* for the context.
|
|
|
|
*/
|
|
|
|
StringInfoData buf;
|
|
|
|
const PQconninfoOption *opt;
|
|
|
|
|
|
|
|
initStringInfo(&buf);
|
|
|
|
for (opt = options; opt->keyword; opt++)
|
|
|
|
{
|
|
|
|
if (is_valid_dblink_option(options, opt->keyword, context))
|
|
|
|
appendStringInfo(&buf, "%s%s",
|
|
|
|
(buf.len > 0) ? ", " : "",
|
|
|
|
opt->keyword);
|
|
|
|
}
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FDW_OPTION_NAME_NOT_FOUND),
|
|
|
|
errmsg("invalid option \"%s\"", def->defname),
|
2021-10-26 23:46:52 +08:00
|
|
|
buf.len > 0
|
|
|
|
? errhint("Valid options in this context are: %s",
|
|
|
|
buf.data)
|
|
|
|
: errhint("There are no valid options in this context.")));
|
2012-10-11 04:53:08 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
PG_RETURN_VOID();
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
/*************************************************************
|
2001-06-15 00:49:03 +08:00
|
|
|
* internal functions
|
|
|
|
*/
|
|
|
|
|
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
/*
|
|
|
|
* get_pkey_attnames
|
|
|
|
*
|
|
|
|
* Get the primary key attnames for the given relation.
|
2018-04-08 04:00:39 +08:00
|
|
|
* Return NULL, and set indnkeyatts = 0, if no primary key exists.
|
2002-04-24 10:28:28 +08:00
|
|
|
*/
|
2002-05-28 05:59:12 +08:00
|
|
|
static char **
|
2018-04-08 04:00:39 +08:00
|
|
|
get_pkey_attnames(Relation rel, int16 *indnkeyatts)
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
|
|
|
Relation indexRelation;
|
2008-01-14 10:49:47 +08:00
|
|
|
ScanKeyData skey;
|
|
|
|
SysScanDesc scan;
|
2002-04-24 10:28:28 +08:00
|
|
|
HeapTuple indexTuple;
|
|
|
|
int i;
|
|
|
|
char **result = NULL;
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
|
2018-04-08 04:00:39 +08:00
|
|
|
/* initialize indnkeyatts to 0 in case no primary key exists */
|
|
|
|
*indnkeyatts = 0;
|
2008-01-14 10:49:47 +08:00
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
tupdesc = rel->rd_att;
|
|
|
|
|
2008-01-14 10:49:47 +08:00
|
|
|
/* Prepare to scan pg_index for entries having indrelid = this rel. */
|
2019-01-22 02:32:19 +08:00
|
|
|
indexRelation = table_open(IndexRelationId, AccessShareLock);
|
2008-01-14 10:49:47 +08:00
|
|
|
ScanKeyInit(&skey,
|
2003-11-13 05:15:59 +08:00
|
|
|
Anum_pg_index_indrelid,
|
|
|
|
BTEqualStrategyNumber, F_OIDEQ,
|
2010-06-15 04:49:33 +08:00
|
|
|
ObjectIdGetDatum(RelationGetRelid(rel)));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2008-01-14 10:49:47 +08:00
|
|
|
scan = systable_beginscan(indexRelation, IndexIndrelidIndexId, true,
|
Use an MVCC snapshot, rather than SnapshotNow, for catalog scans.
SnapshotNow scans have the undesirable property that, in the face of
concurrent updates, the scan can fail to see either the old or the new
versions of the row. In many cases, we work around this by requiring
DDL operations to hold AccessExclusiveLock on the object being
modified; in some cases, the existing locking is inadequate and random
failures occur as a result. This commit doesn't change anything
related to locking, but will hopefully pave the way to allowing lock
strength reductions in the future.
The major issue has held us back from making this change in the past
is that taking an MVCC snapshot is significantly more expensive than
using a static special snapshot such as SnapshotNow. However, testing
of various worst-case scenarios reveals that this problem is not
severe except under fairly extreme workloads. To mitigate those
problems, we avoid retaking the MVCC snapshot for each new scan;
instead, we take a new snapshot only when invalidation messages have
been processed. The catcache machinery already requires that
invalidation messages be sent before releasing the related heavyweight
lock; else other backends might rely on locally-cached data rather
than scanning the catalog at all. Thus, making snapshot reuse
dependent on the same guarantees shouldn't break anything that wasn't
already subtly broken.
Patch by me. Review by Michael Paquier and Andres Freund.
2013-07-02 21:47:01 +08:00
|
|
|
NULL, 1, &skey);
|
2008-01-14 10:49:47 +08:00
|
|
|
|
|
|
|
while (HeapTupleIsValid(indexTuple = systable_getnext(scan)))
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
|
|
|
Form_pg_index index = (Form_pg_index) GETSTRUCT(indexTuple);
|
|
|
|
|
2002-09-02 14:13:31 +08:00
|
|
|
/* we're only interested if it is the primary key */
|
2008-01-14 10:49:47 +08:00
|
|
|
if (index->indisprimary)
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
2018-04-08 04:00:39 +08:00
|
|
|
*indnkeyatts = index->indnkeyatts;
|
|
|
|
if (*indnkeyatts > 0)
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
2018-04-08 04:00:39 +08:00
|
|
|
result = (char **) palloc(*indnkeyatts * sizeof(char *));
|
2002-09-02 14:13:31 +08:00
|
|
|
|
2018-04-08 04:00:39 +08:00
|
|
|
for (i = 0; i < *indnkeyatts; i++)
|
2005-03-29 08:17:27 +08:00
|
|
|
result[i] = SPI_fname(tupdesc, index->indkey.values[i]);
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-01-14 10:49:47 +08:00
|
|
|
|
|
|
|
systable_endscan(scan);
|
2019-01-22 02:32:19 +08:00
|
|
|
table_close(indexRelation, AccessShareLock);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2005-11-18 10:38:24 +08:00
|
|
|
/*
|
|
|
|
* Deconstruct a text[] into C-strings (note any NULL elements will be
|
|
|
|
* returned as NULL pointers)
|
|
|
|
*/
|
|
|
|
static char **
|
|
|
|
get_text_array_contents(ArrayType *array, int *numitems)
|
|
|
|
{
|
|
|
|
int ndim = ARR_NDIM(array);
|
|
|
|
int *dims = ARR_DIMS(array);
|
|
|
|
int nitems;
|
|
|
|
int16 typlen;
|
|
|
|
bool typbyval;
|
|
|
|
char typalign;
|
|
|
|
char **values;
|
|
|
|
char *ptr;
|
|
|
|
bits8 *bitmap;
|
|
|
|
int bitmask;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
Assert(ARR_ELEMTYPE(array) == TEXTOID);
|
|
|
|
|
|
|
|
*numitems = nitems = ArrayGetNItems(ndim, dims);
|
|
|
|
|
|
|
|
get_typlenbyvalalign(ARR_ELEMTYPE(array),
|
|
|
|
&typlen, &typbyval, &typalign);
|
|
|
|
|
|
|
|
values = (char **) palloc(nitems * sizeof(char *));
|
|
|
|
|
|
|
|
ptr = ARR_DATA_PTR(array);
|
|
|
|
bitmap = ARR_NULLBITMAP(array);
|
|
|
|
bitmask = 1;
|
|
|
|
|
|
|
|
for (i = 0; i < nitems; i++)
|
|
|
|
{
|
|
|
|
if (bitmap && (*bitmap & bitmask) == 0)
|
|
|
|
{
|
|
|
|
values[i] = NULL;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
2008-03-26 06:42:46 +08:00
|
|
|
values[i] = TextDatumGetCString(PointerGetDatum(ptr));
|
2007-04-06 12:21:44 +08:00
|
|
|
ptr = att_addlength_pointer(ptr, typlen, ptr);
|
|
|
|
ptr = (char *) att_align_nominal(ptr, typalign);
|
2005-11-18 10:38:24 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* advance bitmap pointer if any */
|
|
|
|
if (bitmap)
|
|
|
|
{
|
|
|
|
bitmask <<= 1;
|
|
|
|
if (bitmask == 0x100)
|
|
|
|
{
|
|
|
|
bitmap++;
|
|
|
|
bitmask = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return values;
|
|
|
|
}
|
|
|
|
|
2002-05-28 05:59:12 +08:00
|
|
|
static char *
|
2010-06-16 00:22:19 +08:00
|
|
|
get_sql_insert(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals, char **tgt_pkattvals)
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
|
|
|
char *relname;
|
|
|
|
HeapTuple tuple;
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
int natts;
|
2006-03-01 14:51:01 +08:00
|
|
|
StringInfoData buf;
|
2002-08-03 02:15:10 +08:00
|
|
|
char *val;
|
2010-06-16 00:22:19 +08:00
|
|
|
int key;
|
2002-08-03 02:15:10 +08:00
|
|
|
int i;
|
|
|
|
bool needComma;
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
initStringInfo(&buf);
|
|
|
|
|
2002-11-24 02:59:25 +08:00
|
|
|
/* get relation name including any needed schema prefix and quoting */
|
2010-06-15 04:49:33 +08:00
|
|
|
relname = generate_relation_name(rel);
|
2002-11-24 02:59:25 +08:00
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
tupdesc = rel->rd_att;
|
|
|
|
natts = tupdesc->natts;
|
|
|
|
|
2010-06-15 04:49:33 +08:00
|
|
|
tuple = get_tuple_of_interest(rel, pkattnums, pknumatts, src_pkattvals);
|
2002-09-02 14:13:31 +08:00
|
|
|
if (!tuple)
|
2003-07-25 01:52:50 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_CARDINALITY_VIOLATION),
|
|
|
|
errmsg("source row not found")));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfo(&buf, "INSERT INTO %s(", relname);
|
2002-08-03 02:15:10 +08:00
|
|
|
|
|
|
|
needComma = false;
|
2002-04-24 10:28:28 +08:00
|
|
|
for (i = 0; i < natts; i++)
|
|
|
|
{
|
2017-08-21 02:19:07 +08:00
|
|
|
Form_pg_attribute att = TupleDescAttr(tupdesc, i);
|
|
|
|
|
|
|
|
if (att->attisdropped)
|
2002-08-03 02:15:10 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (needComma)
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoChar(&buf, ',');
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfoString(&buf,
|
2017-08-21 02:19:07 +08:00
|
|
|
quote_ident_cstr(NameStr(att->attname)));
|
2002-08-03 02:15:10 +08:00
|
|
|
needComma = true;
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoString(&buf, ") VALUES(");
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
2010-06-16 03:04:15 +08:00
|
|
|
* Note: i is physical column number (counting from 0).
|
2002-04-24 10:28:28 +08:00
|
|
|
*/
|
2002-08-03 02:15:10 +08:00
|
|
|
needComma = false;
|
2002-04-24 10:28:28 +08:00
|
|
|
for (i = 0; i < natts; i++)
|
|
|
|
{
|
2017-08-21 02:19:07 +08:00
|
|
|
if (TupleDescAttr(tupdesc, i)->attisdropped)
|
2002-08-03 02:15:10 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (needComma)
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoChar(&buf, ',');
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2010-06-16 03:04:15 +08:00
|
|
|
key = get_attnum_pk_pos(pkattnums, pknumatts, i);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2010-06-16 03:04:15 +08:00
|
|
|
if (key >= 0)
|
2005-11-18 10:38:24 +08:00
|
|
|
val = tgt_pkattvals[key] ? pstrdup(tgt_pkattvals[key]) : NULL;
|
2002-04-24 10:28:28 +08:00
|
|
|
else
|
|
|
|
val = SPI_getvalue(tuple, tupdesc, i + 1);
|
|
|
|
|
|
|
|
if (val != NULL)
|
|
|
|
{
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfoString(&buf, quote_literal_cstr(val));
|
2002-04-24 10:28:28 +08:00
|
|
|
pfree(val);
|
|
|
|
}
|
|
|
|
else
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoString(&buf, "NULL");
|
2002-08-03 02:15:10 +08:00
|
|
|
needComma = true;
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoChar(&buf, ')');
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2017-08-18 00:39:20 +08:00
|
|
|
return buf.data;
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
|
2002-05-28 05:59:12 +08:00
|
|
|
static char *
|
2010-06-16 00:22:19 +08:00
|
|
|
get_sql_delete(Relation rel, int *pkattnums, int pknumatts, char **tgt_pkattvals)
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
|
|
|
char *relname;
|
|
|
|
TupleDesc tupdesc;
|
2006-03-01 14:51:01 +08:00
|
|
|
StringInfoData buf;
|
2002-08-03 02:15:10 +08:00
|
|
|
int i;
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
initStringInfo(&buf);
|
|
|
|
|
2002-11-24 02:59:25 +08:00
|
|
|
/* get relation name including any needed schema prefix and quoting */
|
2010-06-15 04:49:33 +08:00
|
|
|
relname = generate_relation_name(rel);
|
2002-11-24 02:59:25 +08:00
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
tupdesc = rel->rd_att;
|
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfo(&buf, "DELETE FROM %s WHERE ", relname);
|
2002-04-24 10:28:28 +08:00
|
|
|
for (i = 0; i < pknumatts; i++)
|
|
|
|
{
|
2010-06-16 00:22:19 +08:00
|
|
|
int pkattnum = pkattnums[i];
|
2017-08-21 02:19:07 +08:00
|
|
|
Form_pg_attribute attr = TupleDescAttr(tupdesc, pkattnum);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
if (i > 0)
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoString(&buf, " AND ");
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfoString(&buf,
|
2017-08-21 02:19:07 +08:00
|
|
|
quote_ident_cstr(NameStr(attr->attname)));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2005-11-18 10:38:24 +08:00
|
|
|
if (tgt_pkattvals[i] != NULL)
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfo(&buf, " = %s",
|
2005-11-18 10:38:24 +08:00
|
|
|
quote_literal_cstr(tgt_pkattvals[i]));
|
2002-04-24 10:28:28 +08:00
|
|
|
else
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoString(&buf, " IS NULL");
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
|
2017-08-18 00:39:20 +08:00
|
|
|
return buf.data;
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
|
2002-05-28 05:59:12 +08:00
|
|
|
static char *
|
2010-06-16 00:22:19 +08:00
|
|
|
get_sql_update(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals, char **tgt_pkattvals)
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
|
|
|
char *relname;
|
|
|
|
HeapTuple tuple;
|
|
|
|
TupleDesc tupdesc;
|
|
|
|
int natts;
|
2006-03-01 14:51:01 +08:00
|
|
|
StringInfoData buf;
|
2002-08-03 02:15:10 +08:00
|
|
|
char *val;
|
2010-06-16 00:22:19 +08:00
|
|
|
int key;
|
2002-04-24 10:28:28 +08:00
|
|
|
int i;
|
2002-08-03 02:15:10 +08:00
|
|
|
bool needComma;
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
initStringInfo(&buf);
|
|
|
|
|
2002-11-24 02:59:25 +08:00
|
|
|
/* get relation name including any needed schema prefix and quoting */
|
2010-06-15 04:49:33 +08:00
|
|
|
relname = generate_relation_name(rel);
|
2002-11-24 02:59:25 +08:00
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
tupdesc = rel->rd_att;
|
|
|
|
natts = tupdesc->natts;
|
|
|
|
|
2010-06-15 04:49:33 +08:00
|
|
|
tuple = get_tuple_of_interest(rel, pkattnums, pknumatts, src_pkattvals);
|
2002-09-02 14:13:31 +08:00
|
|
|
if (!tuple)
|
2003-07-25 01:52:50 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_CARDINALITY_VIOLATION),
|
|
|
|
errmsg("source row not found")));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfo(&buf, "UPDATE %s SET ", relname);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2010-06-16 03:04:15 +08:00
|
|
|
/*
|
|
|
|
* Note: i is physical column number (counting from 0).
|
|
|
|
*/
|
2002-08-03 02:15:10 +08:00
|
|
|
needComma = false;
|
2002-04-24 10:28:28 +08:00
|
|
|
for (i = 0; i < natts; i++)
|
|
|
|
{
|
2017-08-21 02:19:07 +08:00
|
|
|
Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
|
|
|
|
|
|
|
|
if (attr->attisdropped)
|
2002-08-03 02:15:10 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (needComma)
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoString(&buf, ", ");
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfo(&buf, "%s = ",
|
2017-08-21 02:19:07 +08:00
|
|
|
quote_ident_cstr(NameStr(attr->attname)));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2010-06-16 03:04:15 +08:00
|
|
|
key = get_attnum_pk_pos(pkattnums, pknumatts, i);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2010-06-16 03:04:15 +08:00
|
|
|
if (key >= 0)
|
2005-11-18 10:38:24 +08:00
|
|
|
val = tgt_pkattvals[key] ? pstrdup(tgt_pkattvals[key]) : NULL;
|
2002-04-24 10:28:28 +08:00
|
|
|
else
|
|
|
|
val = SPI_getvalue(tuple, tupdesc, i + 1);
|
|
|
|
|
|
|
|
if (val != NULL)
|
|
|
|
{
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfoString(&buf, quote_literal_cstr(val));
|
2002-04-24 10:28:28 +08:00
|
|
|
pfree(val);
|
|
|
|
}
|
|
|
|
else
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfoString(&buf, "NULL");
|
2002-08-03 02:15:10 +08:00
|
|
|
needComma = true;
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoString(&buf, " WHERE ");
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
for (i = 0; i < pknumatts; i++)
|
|
|
|
{
|
2010-06-16 00:22:19 +08:00
|
|
|
int pkattnum = pkattnums[i];
|
2017-08-21 02:19:07 +08:00
|
|
|
Form_pg_attribute attr = TupleDescAttr(tupdesc, pkattnum);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
if (i > 0)
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoString(&buf, " AND ");
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoString(&buf,
|
2017-08-21 02:19:07 +08:00
|
|
|
quote_ident_cstr(NameStr(attr->attname)));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2010-06-16 03:04:15 +08:00
|
|
|
val = tgt_pkattvals[i];
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
if (val != NULL)
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfo(&buf, " = %s", quote_literal_cstr(val));
|
2002-04-24 10:28:28 +08:00
|
|
|
else
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoString(&buf, " IS NULL");
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
|
2017-08-18 00:39:20 +08:00
|
|
|
return buf.data;
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return a properly quoted identifier.
|
|
|
|
* Uses quote_ident in quote.c
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
quote_ident_cstr(char *rawstr)
|
|
|
|
{
|
|
|
|
text *rawstr_text;
|
|
|
|
text *result_text;
|
|
|
|
char *result;
|
|
|
|
|
2008-03-26 06:42:46 +08:00
|
|
|
rawstr_text = cstring_to_text(rawstr);
|
2017-03-13 07:35:34 +08:00
|
|
|
result_text = DatumGetTextPP(DirectFunctionCall1(quote_ident,
|
2008-03-26 06:42:46 +08:00
|
|
|
PointerGetDatum(rawstr_text)));
|
|
|
|
result = text_to_cstring(result_text);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
2010-06-16 00:22:19 +08:00
|
|
|
static int
|
|
|
|
get_attnum_pk_pos(int *pkattnums, int pknumatts, int key)
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Not likely a long list anyway, so just scan for the value
|
|
|
|
*/
|
|
|
|
for (i = 0; i < pknumatts; i++)
|
2010-06-16 00:22:19 +08:00
|
|
|
if (key == pkattnums[i])
|
2002-04-24 10:28:28 +08:00
|
|
|
return i;
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2002-05-28 05:59:12 +08:00
|
|
|
static HeapTuple
|
2010-06-16 00:22:19 +08:00
|
|
|
get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pkattvals)
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
|
|
|
char *relname;
|
|
|
|
TupleDesc tupdesc;
|
2010-06-16 03:04:15 +08:00
|
|
|
int natts;
|
2006-03-01 14:51:01 +08:00
|
|
|
StringInfoData buf;
|
2002-04-24 10:28:28 +08:00
|
|
|
int ret;
|
|
|
|
HeapTuple tuple;
|
|
|
|
int i;
|
|
|
|
|
2010-06-16 03:04:15 +08:00
|
|
|
/*
|
|
|
|
* Connect to SPI manager
|
|
|
|
*/
|
|
|
|
if ((ret = SPI_connect()) < 0)
|
|
|
|
/* internal error */
|
|
|
|
elog(ERROR, "SPI connect failure - returned %d", ret);
|
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
initStringInfo(&buf);
|
|
|
|
|
2002-11-24 02:59:25 +08:00
|
|
|
/* get relation name including any needed schema prefix and quoting */
|
2010-06-15 04:49:33 +08:00
|
|
|
relname = generate_relation_name(rel);
|
2002-11-24 02:59:25 +08:00
|
|
|
|
2010-06-15 04:49:33 +08:00
|
|
|
tupdesc = rel->rd_att;
|
2010-06-16 03:04:15 +08:00
|
|
|
natts = tupdesc->natts;
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
2010-06-16 03:04:15 +08:00
|
|
|
* Build sql statement to look up tuple of interest, ie, the one matching
|
|
|
|
* src_pkattvals. We used to use "SELECT *" here, but it's simpler to
|
|
|
|
* generate a result tuple that matches the table's physical structure,
|
|
|
|
* with NULLs for any dropped columns. Otherwise we have to deal with two
|
|
|
|
* different tupdescs and everything's very confusing.
|
2002-04-24 10:28:28 +08:00
|
|
|
*/
|
2010-06-16 03:04:15 +08:00
|
|
|
appendStringInfoString(&buf, "SELECT ");
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2010-06-16 03:04:15 +08:00
|
|
|
for (i = 0; i < natts; i++)
|
|
|
|
{
|
2017-08-21 02:19:07 +08:00
|
|
|
Form_pg_attribute attr = TupleDescAttr(tupdesc, i);
|
|
|
|
|
2010-06-16 03:04:15 +08:00
|
|
|
if (i > 0)
|
|
|
|
appendStringInfoString(&buf, ", ");
|
|
|
|
|
2017-08-21 02:19:07 +08:00
|
|
|
if (attr->attisdropped)
|
2010-06-16 03:04:15 +08:00
|
|
|
appendStringInfoString(&buf, "NULL");
|
|
|
|
else
|
|
|
|
appendStringInfoString(&buf,
|
2017-08-21 02:19:07 +08:00
|
|
|
quote_ident_cstr(NameStr(attr->attname)));
|
2010-06-16 03:04:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
appendStringInfo(&buf, " FROM %s WHERE ", relname);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
for (i = 0; i < pknumatts; i++)
|
|
|
|
{
|
2010-06-16 00:22:19 +08:00
|
|
|
int pkattnum = pkattnums[i];
|
2017-08-21 02:19:07 +08:00
|
|
|
Form_pg_attribute attr = TupleDescAttr(tupdesc, pkattnum);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
if (i > 0)
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoString(&buf, " AND ");
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfoString(&buf,
|
2017-08-21 02:19:07 +08:00
|
|
|
quote_ident_cstr(NameStr(attr->attname)));
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2005-11-18 10:38:24 +08:00
|
|
|
if (src_pkattvals[i] != NULL)
|
2006-03-01 14:51:01 +08:00
|
|
|
appendStringInfo(&buf, " = %s",
|
2005-11-18 10:38:24 +08:00
|
|
|
quote_literal_cstr(src_pkattvals[i]));
|
2002-04-24 10:28:28 +08:00
|
|
|
else
|
2013-10-31 22:55:59 +08:00
|
|
|
appendStringInfoString(&buf, " IS NULL");
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Retrieve the desired tuple
|
|
|
|
*/
|
2006-03-01 14:51:01 +08:00
|
|
|
ret = SPI_exec(buf.data, 0);
|
|
|
|
pfree(buf.data);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Only allow one qualifying tuple
|
|
|
|
*/
|
|
|
|
if ((ret == SPI_OK_SELECT) && (SPI_processed > 1))
|
2003-07-25 01:52:50 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_CARDINALITY_VIOLATION),
|
|
|
|
errmsg("source criteria matched more than one record")));
|
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
else if (ret == SPI_OK_SELECT && SPI_processed == 1)
|
|
|
|
{
|
|
|
|
SPITupleTable *tuptable = SPI_tuptable;
|
2002-09-05 04:31:48 +08:00
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
tuple = SPI_copytuple(tuptable->vals[0]);
|
2003-11-27 04:43:25 +08:00
|
|
|
SPI_finish();
|
2002-04-24 10:28:28 +08:00
|
|
|
|
|
|
|
return tuple;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* no qualifying tuples
|
|
|
|
*/
|
2003-11-27 04:43:25 +08:00
|
|
|
SPI_finish();
|
|
|
|
|
2002-04-24 10:28:28 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* never reached, but keep compiler quiet
|
|
|
|
*/
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-06-15 04:49:33 +08:00
|
|
|
/*
|
|
|
|
* Open the relation named by relname_text, acquire specified type of lock,
|
|
|
|
* verify we have specified permissions.
|
|
|
|
* Caller must close rel when done with it.
|
|
|
|
*/
|
|
|
|
static Relation
|
|
|
|
get_rel_from_relname(text *relname_text, LOCKMODE lockmode, AclMode aclmode)
|
2002-04-24 10:28:28 +08:00
|
|
|
{
|
2002-05-28 05:59:12 +08:00
|
|
|
RangeVar *relvar;
|
|
|
|
Relation rel;
|
2010-06-15 04:49:33 +08:00
|
|
|
AclResult aclresult;
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2005-05-27 08:57:49 +08:00
|
|
|
relvar = makeRangeVarFromNameList(textToQualifiedNameList(relname_text));
|
2019-01-22 02:32:19 +08:00
|
|
|
rel = table_openrv(relvar, lockmode);
|
2002-04-24 10:28:28 +08:00
|
|
|
|
2010-06-15 04:49:33 +08:00
|
|
|
aclresult = pg_class_aclcheck(RelationGetRelid(rel), GetUserId(),
|
|
|
|
aclmode);
|
|
|
|
if (aclresult != ACLCHECK_OK)
|
2017-12-02 22:26:34 +08:00
|
|
|
aclcheck_error(aclresult, get_relkind_objtype(rel->rd_rel->relkind),
|
2010-06-15 04:49:33 +08:00
|
|
|
RelationGetRelationName(rel));
|
|
|
|
|
|
|
|
return rel;
|
2002-04-24 10:28:28 +08:00
|
|
|
}
|
|
|
|
|
2002-11-24 02:59:25 +08:00
|
|
|
/*
|
|
|
|
* generate_relation_name - copied from ruleutils.c
|
2010-06-15 04:49:33 +08:00
|
|
|
* Compute the name to display for a relation
|
2002-11-24 02:59:25 +08:00
|
|
|
*
|
|
|
|
* The result includes all necessary quoting and schema-prefixing.
|
|
|
|
*/
|
|
|
|
static char *
|
2010-06-15 04:49:33 +08:00
|
|
|
generate_relation_name(Relation rel)
|
2002-11-24 02:59:25 +08:00
|
|
|
{
|
|
|
|
char *nspname;
|
|
|
|
char *result;
|
|
|
|
|
|
|
|
/* Qualify the name if not visible in search path */
|
2010-06-15 04:49:33 +08:00
|
|
|
if (RelationIsVisible(RelationGetRelid(rel)))
|
2002-11-24 02:59:25 +08:00
|
|
|
nspname = NULL;
|
|
|
|
else
|
2010-06-15 04:49:33 +08:00
|
|
|
nspname = get_namespace_name(rel->rd_rel->relnamespace);
|
2002-11-24 02:59:25 +08:00
|
|
|
|
2010-06-15 04:49:33 +08:00
|
|
|
result = quote_qualified_identifier(nspname, RelationGetRelationName(rel));
|
2002-11-24 02:59:25 +08:00
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
2003-06-25 09:10:15 +08:00
|
|
|
|
|
|
|
|
|
|
|
static remoteConn *
|
|
|
|
getConnectionByName(const char *name)
|
|
|
|
{
|
|
|
|
remoteConnHashEnt *hentry;
|
2010-06-03 17:38:33 +08:00
|
|
|
char *key;
|
2003-06-25 09:10:15 +08:00
|
|
|
|
|
|
|
if (!remoteConnHash)
|
|
|
|
remoteConnHash = createConnHash();
|
|
|
|
|
2010-06-03 17:38:33 +08:00
|
|
|
key = pstrdup(name);
|
2010-11-25 18:40:58 +08:00
|
|
|
truncate_identifier(key, strlen(key), false);
|
2003-06-25 09:10:15 +08:00
|
|
|
hentry = (remoteConnHashEnt *) hash_search(remoteConnHash,
|
|
|
|
key, HASH_FIND, NULL);
|
|
|
|
|
|
|
|
if (hentry)
|
2017-08-18 00:39:20 +08:00
|
|
|
return hentry->rconn;
|
2003-06-25 09:10:15 +08:00
|
|
|
|
2017-08-18 00:39:20 +08:00
|
|
|
return NULL;
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static HTAB *
|
|
|
|
createConnHash(void)
|
|
|
|
{
|
|
|
|
HASHCTL ctl;
|
|
|
|
|
|
|
|
ctl.keysize = NAMEDATALEN;
|
|
|
|
ctl.entrysize = sizeof(remoteConnHashEnt);
|
|
|
|
|
Improve hash_create()'s API for some added robustness.
Invent a new flag bit HASH_STRINGS to specify C-string hashing, which
was formerly the default; and add assertions insisting that exactly
one of the bits HASH_STRINGS, HASH_BLOBS, and HASH_FUNCTION be set.
This is in hopes of preventing recurrences of the type of oversight
fixed in commit a1b8aa1e4 (i.e., mistakenly omitting HASH_BLOBS).
Also, when HASH_STRINGS is specified, insist that the keysize be
more than 8 bytes. This is a heuristic, but it should catch
accidental use of HASH_STRINGS for integer or pointer keys.
(Nearly all existing use-cases set the keysize to NAMEDATALEN or
more, so there's little reason to think this restriction should
be problematic.)
Tweak hash_create() to insist that the HASH_ELEM flag be set, and
remove the defaults it had for keysize and entrysize. Since those
defaults were undocumented and basically useless, no callers
omitted HASH_ELEM anyway.
Also, remove memset's zeroing the HASHCTL parameter struct from
those callers that had one. This has never been really necessary,
and while it wasn't a bad coding convention it was confusing that
some callers did it and some did not. We might as well save a few
cycles by standardizing on "not".
Also improve the documentation for hash_create().
In passing, improve reinit.c's usage of a hash table by storing
the key as a binary Oid rather than a string; and, since that's
a temporary hash table, allocate it in CurrentMemoryContext for
neatness.
Discussion: https://postgr.es/m/590625.1607878171@sss.pgh.pa.us
2020-12-16 00:38:53 +08:00
|
|
|
return hash_create("Remote Con hash", NUMCONN, &ctl,
|
|
|
|
HASH_ELEM | HASH_STRINGS);
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
|
|
|
|
2003-07-25 01:52:50 +08:00
|
|
|
static void
|
2005-10-08 19:33:45 +08:00
|
|
|
createNewConnection(const char *name, remoteConn *rconn)
|
2003-06-25 09:10:15 +08:00
|
|
|
{
|
|
|
|
remoteConnHashEnt *hentry;
|
|
|
|
bool found;
|
2010-06-03 17:38:33 +08:00
|
|
|
char *key;
|
2003-06-25 09:10:15 +08:00
|
|
|
|
|
|
|
if (!remoteConnHash)
|
2003-07-25 01:52:50 +08:00
|
|
|
remoteConnHash = createConnHash();
|
2003-06-25 09:10:15 +08:00
|
|
|
|
2010-06-03 17:38:33 +08:00
|
|
|
key = pstrdup(name);
|
|
|
|
truncate_identifier(key, strlen(key), true);
|
2003-06-25 09:10:15 +08:00
|
|
|
hentry = (remoteConnHashEnt *) hash_search(remoteConnHash, key,
|
|
|
|
HASH_ENTER, &found);
|
|
|
|
|
|
|
|
if (found)
|
2010-06-09 08:56:02 +08:00
|
|
|
{
|
|
|
|
PQfinish(rconn->conn);
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
ReleaseExternalFD();
|
2010-06-09 08:56:02 +08:00
|
|
|
pfree(rconn);
|
|
|
|
|
2003-07-25 01:52:50 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
|
|
|
errmsg("duplicate connection name")));
|
2010-06-09 08:56:02 +08:00
|
|
|
}
|
2003-06-25 09:10:15 +08:00
|
|
|
|
2005-10-08 20:12:29 +08:00
|
|
|
hentry->rconn = rconn;
|
2007-02-07 08:52:35 +08:00
|
|
|
strlcpy(hentry->name, name, sizeof(hentry->name));
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
deleteConnection(const char *name)
|
|
|
|
{
|
|
|
|
remoteConnHashEnt *hentry;
|
|
|
|
bool found;
|
2010-06-03 17:38:33 +08:00
|
|
|
char *key;
|
2003-06-25 09:10:15 +08:00
|
|
|
|
|
|
|
if (!remoteConnHash)
|
|
|
|
remoteConnHash = createConnHash();
|
|
|
|
|
2010-06-03 17:38:33 +08:00
|
|
|
key = pstrdup(name);
|
2010-11-25 18:40:58 +08:00
|
|
|
truncate_identifier(key, strlen(key), false);
|
2003-06-25 09:10:15 +08:00
|
|
|
hentry = (remoteConnHashEnt *) hash_search(remoteConnHash,
|
|
|
|
key, HASH_REMOVE, &found);
|
|
|
|
|
|
|
|
if (!hentry)
|
2003-07-25 01:52:50 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_UNDEFINED_OBJECT),
|
|
|
|
errmsg("undefined connection name")));
|
2003-06-25 09:10:15 +08:00
|
|
|
}
|
2008-01-04 05:27:59 +08:00
|
|
|
|
|
|
|
static void
|
|
|
|
dblink_security_check(PGconn *conn, remoteConn *rconn)
|
|
|
|
{
|
|
|
|
if (!superuser())
|
|
|
|
{
|
|
|
|
if (!PQconnectionUsedPassword(conn))
|
|
|
|
{
|
|
|
|
PQfinish(conn);
|
Account explicitly for long-lived FDs that are allocated outside fd.c.
The comments in fd.c have long claimed that all file allocations should
go through that module, but in reality that's not always practical.
fd.c doesn't supply APIs for invoking some FD-producing syscalls like
pipe() or epoll_create(); and the APIs it does supply for non-virtual
FDs are mostly insistent on releasing those FDs at transaction end;
and in some cases the actual open() call is in code that can't be made
to use fd.c, such as libpq.
This has led to a situation where, in a modern server, there are likely
to be seven or so long-lived FDs per backend process that are not known
to fd.c. Since NUM_RESERVED_FDS is only 10, that meant we had *very*
few spare FDs if max_files_per_process is >= the system ulimit and
fd.c had opened all the files it thought it safely could. The
contrib/postgres_fdw regression test, in particular, could easily be
made to fall over by running it under a restrictive ulimit.
To improve matters, invent functions Acquire/Reserve/ReleaseExternalFD
that allow outside callers to tell fd.c that they have or want to allocate
a FD that's not directly managed by fd.c. Add calls to track all the
fixed FDs in a standard backend session, so that we are honestly
guaranteeing that NUM_RESERVED_FDS FDs remain unused below the EMFILE
limit in a backend's idle state. The coding rules for these functions say
that there's no need to call them in code that just allocates one FD over
a fairly short interval; we can dip into NUM_RESERVED_FDS for such cases.
That means that there aren't all that many places where we need to worry.
But postgres_fdw and dblink must use this facility to account for
long-lived FDs consumed by libpq connections. There may be other places
where it's worth doing such accounting, too, but this seems like enough
to solve the immediate problem.
Internally to fd.c, "external" FDs are limited to max_safe_fds/3 FDs.
(Callers can choose to ignore this limit, but of course it's unwise
to do so except for fixed file allocations.) I also reduced the limit
on "allocated" files to max_safe_fds/3 FDs (it had been max_safe_fds/2).
Conceivably a smarter rule could be used here --- but in practice,
on reasonable systems, max_safe_fds should be large enough that this
isn't much of an issue, so KISS for now. To avoid possible regression
in the number of external or allocated files that can be opened,
increase FD_MINFREE and the lower limit on max_files_per_process a
little bit; we now insist that the effective "ulimit -n" be at least 64.
This seems like pretty clearly a bug fix, but in view of the lack of
field complaints, I'll refrain from risking a back-patch.
Discussion: https://postgr.es/m/E1izCmM-0005pV-Co@gemulon.postgresql.org
2020-02-25 06:28:33 +08:00
|
|
|
ReleaseExternalFD();
|
2008-01-04 05:27:59 +08:00
|
|
|
if (rconn)
|
|
|
|
pfree(rconn);
|
|
|
|
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
|
|
|
|
errmsg("password is required"),
|
|
|
|
errdetail("Non-superuser cannot connect if the server does not request a password."),
|
|
|
|
errhint("Target server's authentication method must be changed.")));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-07-03 11:56:57 +08:00
|
|
|
|
2008-09-22 21:55:14 +08:00
|
|
|
/*
|
|
|
|
* For non-superusers, insist that the connstr specify a password. This
|
|
|
|
* prevents a password from being picked up from .pgpass, a service file,
|
|
|
|
* the environment, etc. We don't want the postgres user's passwords
|
|
|
|
* to be accessible to non-superusers.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
dblink_connstr_check(const char *connstr)
|
|
|
|
{
|
|
|
|
if (!superuser())
|
|
|
|
{
|
|
|
|
PQconninfoOption *options;
|
|
|
|
PQconninfoOption *option;
|
|
|
|
bool connstr_gives_password = false;
|
|
|
|
|
|
|
|
options = PQconninfoParse(connstr, NULL);
|
|
|
|
if (options)
|
|
|
|
{
|
|
|
|
for (option = options; option->keyword != NULL; option++)
|
|
|
|
{
|
|
|
|
if (strcmp(option->keyword, "password") == 0)
|
|
|
|
{
|
|
|
|
if (option->val != NULL && option->val[0] != '\0')
|
|
|
|
{
|
|
|
|
connstr_gives_password = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
PQconninfoFree(options);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!connstr_gives_password)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_S_R_E_PROHIBITED_SQL_STATEMENT_ATTEMPTED),
|
|
|
|
errmsg("password is required"),
|
|
|
|
errdetail("Non-superusers must provide a password in the connection string.")));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-23 05:33:10 +08:00
|
|
|
/*
|
|
|
|
* Report an error received from the remote server
|
|
|
|
*
|
|
|
|
* res: the received error result (will be freed)
|
|
|
|
* fail: true for ERROR ereport, false for NOTICE
|
|
|
|
* fmt and following args: sprintf-style format and values for errcontext;
|
|
|
|
* the resulting string should be worded like "while <some action>"
|
|
|
|
*/
|
2008-07-03 11:56:57 +08:00
|
|
|
static void
|
2016-12-23 01:48:05 +08:00
|
|
|
dblink_res_error(PGconn *conn, const char *conname, PGresult *res,
|
2018-03-23 05:33:10 +08:00
|
|
|
bool fail, const char *fmt,...)
|
2008-07-03 11:56:57 +08:00
|
|
|
{
|
|
|
|
int level;
|
|
|
|
char *pg_diag_sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE);
|
|
|
|
char *pg_diag_message_primary = PQresultErrorField(res, PG_DIAG_MESSAGE_PRIMARY);
|
|
|
|
char *pg_diag_message_detail = PQresultErrorField(res, PG_DIAG_MESSAGE_DETAIL);
|
|
|
|
char *pg_diag_message_hint = PQresultErrorField(res, PG_DIAG_MESSAGE_HINT);
|
|
|
|
char *pg_diag_context = PQresultErrorField(res, PG_DIAG_CONTEXT);
|
|
|
|
int sqlstate;
|
|
|
|
char *message_primary;
|
|
|
|
char *message_detail;
|
|
|
|
char *message_hint;
|
|
|
|
char *message_context;
|
2018-03-23 05:33:10 +08:00
|
|
|
va_list ap;
|
|
|
|
char dblink_context_msg[512];
|
2008-07-03 11:56:57 +08:00
|
|
|
|
|
|
|
if (fail)
|
|
|
|
level = ERROR;
|
|
|
|
else
|
|
|
|
level = NOTICE;
|
|
|
|
|
|
|
|
if (pg_diag_sqlstate)
|
|
|
|
sqlstate = MAKE_SQLSTATE(pg_diag_sqlstate[0],
|
|
|
|
pg_diag_sqlstate[1],
|
|
|
|
pg_diag_sqlstate[2],
|
|
|
|
pg_diag_sqlstate[3],
|
|
|
|
pg_diag_sqlstate[4]);
|
|
|
|
else
|
|
|
|
sqlstate = ERRCODE_CONNECTION_FAILURE;
|
|
|
|
|
2016-12-26 01:00:00 +08:00
|
|
|
message_primary = xpstrdup(pg_diag_message_primary);
|
|
|
|
message_detail = xpstrdup(pg_diag_message_detail);
|
|
|
|
message_hint = xpstrdup(pg_diag_message_hint);
|
|
|
|
message_context = xpstrdup(pg_diag_context);
|
2008-07-03 11:56:57 +08:00
|
|
|
|
2016-12-23 01:48:05 +08:00
|
|
|
/*
|
|
|
|
* If we don't get a message from the PGresult, try the PGconn. This is
|
|
|
|
* needed because for connection-level failures, PQexec may just return
|
|
|
|
* NULL, not a PGresult at all.
|
|
|
|
*/
|
|
|
|
if (message_primary == NULL)
|
2017-02-27 21:30:06 +08:00
|
|
|
message_primary = pchomp(PQerrorMessage(conn));
|
2016-12-23 01:48:05 +08:00
|
|
|
|
2018-03-23 05:33:10 +08:00
|
|
|
/*
|
|
|
|
* Now that we've copied all the data we need out of the PGresult, it's
|
|
|
|
* safe to free it. We must do this to avoid PGresult leakage. We're
|
|
|
|
* leaking all the strings too, but those are in palloc'd memory that will
|
|
|
|
* get cleaned up eventually.
|
|
|
|
*/
|
2008-07-03 11:56:57 +08:00
|
|
|
if (res)
|
|
|
|
PQclear(res);
|
|
|
|
|
2018-03-23 05:33:10 +08:00
|
|
|
/*
|
|
|
|
* Format the basic errcontext string. Below, we'll add on something
|
|
|
|
* about the connection name. That's a violation of the translatability
|
|
|
|
* guidelines about constructing error messages out of parts, but since
|
|
|
|
* there's no translation support for dblink, there's no need to worry
|
|
|
|
* about that (yet).
|
|
|
|
*/
|
|
|
|
va_start(ap, fmt);
|
|
|
|
vsnprintf(dblink_context_msg, sizeof(dblink_context_msg), fmt, ap);
|
|
|
|
va_end(ap);
|
2008-07-03 11:56:57 +08:00
|
|
|
|
|
|
|
ereport(level,
|
|
|
|
(errcode(sqlstate),
|
2021-12-03 16:35:29 +08:00
|
|
|
(message_primary != NULL && message_primary[0] != '\0') ?
|
|
|
|
errmsg_internal("%s", message_primary) :
|
2016-12-22 07:47:54 +08:00
|
|
|
errmsg("could not obtain message string for remote error"),
|
2011-07-17 02:21:12 +08:00
|
|
|
message_detail ? errdetail_internal("%s", message_detail) : 0,
|
2008-07-03 11:56:57 +08:00
|
|
|
message_hint ? errhint("%s", message_hint) : 0,
|
2018-03-23 05:33:10 +08:00
|
|
|
message_context ? (errcontext("%s", message_context)) : 0,
|
|
|
|
conname ?
|
|
|
|
(errcontext("%s on dblink connection named \"%s\"",
|
|
|
|
dblink_context_msg, conname)) :
|
|
|
|
(errcontext("%s on unnamed dblink connection",
|
|
|
|
dblink_context_msg))));
|
2008-07-03 11:56:57 +08:00
|
|
|
}
|
2009-06-07 05:27:56 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Obtain connection string for a foreign server
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
get_connect_string(const char *servername)
|
|
|
|
{
|
|
|
|
ForeignServer *foreign_server = NULL;
|
|
|
|
UserMapping *user_mapping;
|
|
|
|
ListCell *cell;
|
2017-03-10 22:59:10 +08:00
|
|
|
StringInfoData buf;
|
2009-06-07 05:27:56 +08:00
|
|
|
ForeignDataWrapper *fdw;
|
|
|
|
AclResult aclresult;
|
2010-06-03 17:38:33 +08:00
|
|
|
char *srvname;
|
2009-06-07 05:27:56 +08:00
|
|
|
|
2016-12-23 01:19:44 +08:00
|
|
|
static const PQconninfoOption *options = NULL;
|
|
|
|
|
2017-03-10 22:59:10 +08:00
|
|
|
initStringInfo(&buf);
|
|
|
|
|
2016-12-23 01:19:44 +08:00
|
|
|
/*
|
|
|
|
* Get list of valid libpq options.
|
|
|
|
*
|
|
|
|
* To avoid unnecessary work, we get the list once and use it throughout
|
|
|
|
* the lifetime of this backend process. We don't need to care about
|
|
|
|
* memory context issues, because PQconndefaults allocates with malloc.
|
|
|
|
*/
|
|
|
|
if (!options)
|
|
|
|
{
|
|
|
|
options = PQconndefaults();
|
|
|
|
if (!options) /* assume reason for failure is OOM */
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_FDW_OUT_OF_MEMORY),
|
|
|
|
errmsg("out of memory"),
|
2018-03-23 05:33:10 +08:00
|
|
|
errdetail("Could not get libpq's default connection options.")));
|
2016-12-23 01:19:44 +08:00
|
|
|
}
|
|
|
|
|
2009-06-07 05:27:56 +08:00
|
|
|
/* first gather the server connstr options */
|
2010-06-03 17:38:33 +08:00
|
|
|
srvname = pstrdup(servername);
|
2010-06-09 11:39:26 +08:00
|
|
|
truncate_identifier(srvname, strlen(srvname), false);
|
2010-06-03 17:38:33 +08:00
|
|
|
foreign_server = GetForeignServerByName(srvname, true);
|
2009-06-07 05:27:56 +08:00
|
|
|
|
|
|
|
if (foreign_server)
|
|
|
|
{
|
|
|
|
Oid serverid = foreign_server->serverid;
|
|
|
|
Oid fdwid = foreign_server->fdwid;
|
|
|
|
Oid userid = GetUserId();
|
|
|
|
|
|
|
|
user_mapping = GetUserMapping(userid, serverid);
|
|
|
|
fdw = GetForeignDataWrapper(fdwid);
|
|
|
|
|
|
|
|
/* Check permissions, user must have usage on the server. */
|
|
|
|
aclresult = pg_foreign_server_aclcheck(serverid, userid, ACL_USAGE);
|
|
|
|
if (aclresult != ACLCHECK_OK)
|
2017-12-02 22:26:34 +08:00
|
|
|
aclcheck_error(aclresult, OBJECT_FOREIGN_SERVER, foreign_server->servername);
|
2009-06-07 05:27:56 +08:00
|
|
|
|
|
|
|
foreach(cell, fdw->options)
|
|
|
|
{
|
|
|
|
DefElem *def = lfirst(cell);
|
|
|
|
|
2016-12-23 01:19:44 +08:00
|
|
|
if (is_valid_dblink_option(options, def->defname, ForeignDataWrapperRelationId))
|
2017-03-10 22:59:10 +08:00
|
|
|
appendStringInfo(&buf, "%s='%s' ", def->defname,
|
2016-12-23 01:19:44 +08:00
|
|
|
escape_param_str(strVal(def->arg)));
|
2009-06-07 05:27:56 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
foreach(cell, foreign_server->options)
|
|
|
|
{
|
|
|
|
DefElem *def = lfirst(cell);
|
2009-06-11 22:49:15 +08:00
|
|
|
|
2016-12-23 01:19:44 +08:00
|
|
|
if (is_valid_dblink_option(options, def->defname, ForeignServerRelationId))
|
2017-03-10 22:59:10 +08:00
|
|
|
appendStringInfo(&buf, "%s='%s' ", def->defname,
|
2016-12-23 01:19:44 +08:00
|
|
|
escape_param_str(strVal(def->arg)));
|
2009-06-07 05:27:56 +08:00
|
|
|
}
|
2009-06-11 22:49:15 +08:00
|
|
|
|
2009-06-07 05:27:56 +08:00
|
|
|
foreach(cell, user_mapping->options)
|
|
|
|
{
|
2009-06-11 22:49:15 +08:00
|
|
|
|
2009-06-07 05:27:56 +08:00
|
|
|
DefElem *def = lfirst(cell);
|
2009-06-11 22:49:15 +08:00
|
|
|
|
2016-12-23 01:19:44 +08:00
|
|
|
if (is_valid_dblink_option(options, def->defname, UserMappingRelationId))
|
2017-03-10 22:59:10 +08:00
|
|
|
appendStringInfo(&buf, "%s='%s' ", def->defname,
|
2016-12-23 01:19:44 +08:00
|
|
|
escape_param_str(strVal(def->arg)));
|
2009-06-07 05:27:56 +08:00
|
|
|
}
|
|
|
|
|
2017-03-10 22:59:10 +08:00
|
|
|
return buf.data;
|
2009-06-07 05:27:56 +08:00
|
|
|
}
|
|
|
|
else
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Escaping libpq connect parameter strings.
|
|
|
|
*
|
|
|
|
* Replaces "'" with "\'" and "\" with "\\".
|
|
|
|
*/
|
|
|
|
static char *
|
|
|
|
escape_param_str(const char *str)
|
|
|
|
{
|
|
|
|
const char *cp;
|
2017-03-10 22:59:10 +08:00
|
|
|
StringInfoData buf;
|
|
|
|
|
|
|
|
initStringInfo(&buf);
|
2009-06-07 05:27:56 +08:00
|
|
|
|
|
|
|
for (cp = str; *cp; cp++)
|
|
|
|
{
|
|
|
|
if (*cp == '\\' || *cp == '\'')
|
2017-03-10 22:59:10 +08:00
|
|
|
appendStringInfoChar(&buf, '\\');
|
|
|
|
appendStringInfoChar(&buf, *cp);
|
2009-06-07 05:27:56 +08:00
|
|
|
}
|
|
|
|
|
2017-03-10 22:59:10 +08:00
|
|
|
return buf.data;
|
2009-06-07 05:27:56 +08:00
|
|
|
}
|
2010-02-04 07:01:11 +08:00
|
|
|
|
2010-06-16 00:22:19 +08:00
|
|
|
/*
|
|
|
|
* Validate the PK-attnums argument for dblink_build_sql_insert() and related
|
|
|
|
* functions, and translate to the internal representation.
|
|
|
|
*
|
2010-06-16 04:29:01 +08:00
|
|
|
* The user supplies an int2vector of 1-based logical attnums, plus a count
|
2010-06-16 00:22:19 +08:00
|
|
|
* argument (the need for the separate count argument is historical, but we
|
|
|
|
* still check it). We check that each attnum corresponds to a valid,
|
|
|
|
* non-dropped attribute of the rel. We do *not* prevent attnums from being
|
|
|
|
* listed twice, though the actual use-case for such things is dubious.
|
2010-06-16 04:29:01 +08:00
|
|
|
* Note that before Postgres 9.0, the user's attnums were interpreted as
|
|
|
|
* physical not logical column numbers; this was changed for future-proofing.
|
2010-06-16 00:22:19 +08:00
|
|
|
*
|
|
|
|
* The internal representation is a palloc'd int array of 0-based physical
|
|
|
|
* attnums.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
validate_pkattnums(Relation rel,
|
|
|
|
int2vector *pkattnums_arg, int32 pknumatts_arg,
|
|
|
|
int **pkattnums, int *pknumatts)
|
2010-02-04 07:01:11 +08:00
|
|
|
{
|
2010-06-16 00:22:19 +08:00
|
|
|
TupleDesc tupdesc = rel->rd_att;
|
|
|
|
int natts = tupdesc->natts;
|
2010-02-04 07:01:11 +08:00
|
|
|
int i;
|
|
|
|
|
2010-06-16 00:22:19 +08:00
|
|
|
/* Don't take more array elements than there are */
|
|
|
|
pknumatts_arg = Min(pknumatts_arg, pkattnums_arg->dim1);
|
2010-02-04 07:01:11 +08:00
|
|
|
|
2010-06-16 00:22:19 +08:00
|
|
|
/* Must have at least one pk attnum selected */
|
|
|
|
if (pknumatts_arg <= 0)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("number of key attributes must be > 0")));
|
|
|
|
|
|
|
|
/* Allocate output array */
|
|
|
|
*pkattnums = (int *) palloc(pknumatts_arg * sizeof(int));
|
|
|
|
*pknumatts = pknumatts_arg;
|
|
|
|
|
|
|
|
/* Validate attnums and convert to internal form */
|
|
|
|
for (i = 0; i < pknumatts_arg; i++)
|
2010-02-04 07:01:11 +08:00
|
|
|
{
|
2010-06-16 00:22:19 +08:00
|
|
|
int pkattnum = pkattnums_arg->values[i];
|
2010-06-16 04:29:01 +08:00
|
|
|
int lnum;
|
|
|
|
int j;
|
2010-02-04 07:01:11 +08:00
|
|
|
|
2010-06-16 04:29:01 +08:00
|
|
|
/* Can throw error immediately if out of range */
|
|
|
|
if (pkattnum <= 0 || pkattnum > natts)
|
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("invalid attribute number %d", pkattnum)));
|
|
|
|
|
|
|
|
/* Identify which physical column has this logical number */
|
|
|
|
lnum = 0;
|
|
|
|
for (j = 0; j < natts; j++)
|
|
|
|
{
|
|
|
|
/* dropped columns don't count */
|
2017-08-21 02:19:07 +08:00
|
|
|
if (TupleDescAttr(tupdesc, j)->attisdropped)
|
2010-06-16 04:29:01 +08:00
|
|
|
continue;
|
|
|
|
|
|
|
|
if (++lnum == pkattnum)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (j < natts)
|
|
|
|
(*pkattnums)[i] = j;
|
|
|
|
else
|
2010-06-16 00:22:19 +08:00
|
|
|
ereport(ERROR,
|
|
|
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
|
|
errmsg("invalid attribute number %d", pkattnum)));
|
|
|
|
}
|
2010-02-04 07:01:11 +08:00
|
|
|
}
|
2012-10-11 04:53:08 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Check if the specified connection option is valid.
|
|
|
|
*
|
|
|
|
* We basically allow whatever libpq thinks is an option, with these
|
|
|
|
* restrictions:
|
|
|
|
* debug options: disallowed
|
|
|
|
* "client_encoding": disallowed
|
|
|
|
* "user": valid only in USER MAPPING options
|
|
|
|
* secure options (eg password): valid only in USER MAPPING options
|
|
|
|
* others: valid only in FOREIGN SERVER options
|
|
|
|
*
|
|
|
|
* We disallow client_encoding because it would be overridden anyway via
|
|
|
|
* PQclientEncoding; allowing it to be specified would merely promote
|
|
|
|
* confusion.
|
|
|
|
*/
|
|
|
|
static bool
|
|
|
|
is_valid_dblink_option(const PQconninfoOption *options, const char *option,
|
|
|
|
Oid context)
|
|
|
|
{
|
|
|
|
const PQconninfoOption *opt;
|
|
|
|
|
|
|
|
/* Look up the option in libpq result */
|
|
|
|
for (opt = options; opt->keyword; opt++)
|
|
|
|
{
|
|
|
|
if (strcmp(opt->keyword, option) == 0)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (opt->keyword == NULL)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Disallow debug options (particularly "replication") */
|
|
|
|
if (strchr(opt->dispchar, 'D'))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/* Disallow "client_encoding" */
|
|
|
|
if (strcmp(opt->keyword, "client_encoding") == 0)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the option is "user" or marked secure, it should be specified only
|
|
|
|
* in USER MAPPING. Others should be specified only in SERVER.
|
|
|
|
*/
|
|
|
|
if (strcmp(opt->keyword, "user") == 0 || strchr(opt->dispchar, '*'))
|
|
|
|
{
|
|
|
|
if (context != UserMappingRelationId)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
if (context != ForeignServerRelationId)
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2013-03-23 03:22:15 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy the remote session's values of GUCs that affect datatype I/O
|
|
|
|
* and apply them locally in a new GUC nesting level. Returns the new
|
|
|
|
* nestlevel (which is needed by restoreLocalGucs to undo the settings),
|
|
|
|
* or -1 if no new nestlevel was needed.
|
|
|
|
*
|
|
|
|
* We use the equivalent of a function SET option to allow the settings to
|
|
|
|
* persist only until the caller calls restoreLocalGucs. If an error is
|
|
|
|
* thrown in between, guc.c will take care of undoing the settings.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
applyRemoteGucs(PGconn *conn)
|
|
|
|
{
|
|
|
|
static const char *const GUCsAffectingIO[] = {
|
|
|
|
"DateStyle",
|
|
|
|
"IntervalStyle"
|
|
|
|
};
|
|
|
|
|
|
|
|
int nestlevel = -1;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < lengthof(GUCsAffectingIO); i++)
|
|
|
|
{
|
|
|
|
const char *gucName = GUCsAffectingIO[i];
|
|
|
|
const char *remoteVal = PQparameterStatus(conn, gucName);
|
|
|
|
const char *localVal;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the remote server is pre-8.4, it won't have IntervalStyle, but
|
|
|
|
* that's okay because its output format won't be ambiguous. So just
|
|
|
|
* skip the GUC if we don't get a value for it. (We might eventually
|
|
|
|
* need more complicated logic with remote-version checks here.)
|
|
|
|
*/
|
|
|
|
if (remoteVal == NULL)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Avoid GUC-setting overhead if the remote and local GUCs already
|
|
|
|
* have the same value.
|
|
|
|
*/
|
|
|
|
localVal = GetConfigOption(gucName, false, false);
|
|
|
|
Assert(localVal != NULL);
|
|
|
|
|
|
|
|
if (strcmp(remoteVal, localVal) == 0)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* Create new GUC nest level if we didn't already */
|
|
|
|
if (nestlevel < 0)
|
|
|
|
nestlevel = NewGUCNestLevel();
|
|
|
|
|
|
|
|
/* Apply the option (this will throw error on failure) */
|
|
|
|
(void) set_config_option(gucName, remoteVal,
|
|
|
|
PGC_USERSET, PGC_S_SESSION,
|
2014-11-25 05:13:11 +08:00
|
|
|
GUC_ACTION_SAVE, true, 0, false);
|
2013-03-23 03:22:15 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
return nestlevel;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Restore local GUCs after they have been overlaid with remote settings.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
restoreLocalGucs(int nestlevel)
|
|
|
|
{
|
|
|
|
/* Do nothing if no new nestlevel was created */
|
|
|
|
if (nestlevel > 0)
|
|
|
|
AtEOXact_GUC(true, nestlevel);
|
|
|
|
}
|