mirror of
https://git.postgresql.org/git/postgresql.git
synced 2024-12-21 08:29:39 +08:00
8396447cdb
libpgcommon is a new static library to allow sharing code among the various frontend programs and backend; this lets us eliminate duplicate implementations of common routines. We avoid libpgport, because that's intended as a place for porting issues; per discussion, it seems better to keep them separate. The first use case, and the only implemented by this patch, is pg_malloc and friends, which many frontend programs were already using. At the same time, we can use this to provide palloc emulation functions for the frontend; this way, some palloc-using files in the backend can also be used by the frontend cleanly. To do this, we change palloc() in the backend to be a function instead of a macro on top of MemoryContextAlloc(). This was previously believed to cause loss of performance, but this implementation has been tweaked by Tom and Andres so that on modern compilers it provides a slight improvement over the previous one. This lets us clean up some places that were already with localized hacks. Most of the pg_malloc/palloc changes in this patch were authored by Andres Freund. Zoltán Böszörményi also independently provided a form of that. libpgcommon infrastructure was authored by Álvaro.
56 lines
1.5 KiB
C
56 lines
1.5 KiB
C
/*
|
|
* dump.c
|
|
*
|
|
* dump functions
|
|
*
|
|
* Copyright (c) 2010-2013, PostgreSQL Global Development Group
|
|
* contrib/pg_upgrade/dump.c
|
|
*/
|
|
|
|
#include "postgres_fe.h"
|
|
|
|
#include "pg_upgrade.h"
|
|
|
|
#include <sys/types.h>
|
|
|
|
void
|
|
generate_old_dump(void)
|
|
{
|
|
int dbnum;
|
|
|
|
prep_status("Creating dump of global objects");
|
|
|
|
/* run new pg_dumpall binary for globals */
|
|
exec_prog(UTILITY_LOG_FILE, NULL, true,
|
|
"\"%s/pg_dumpall\" %s --schema-only --globals-only --binary-upgrade %s -f %s",
|
|
new_cluster.bindir, cluster_conn_opts(&old_cluster),
|
|
log_opts.verbose ? "--verbose" : "",
|
|
GLOBALS_DUMP_FILE);
|
|
check_ok();
|
|
|
|
prep_status("Creating dump of database schemas\n");
|
|
|
|
/* create per-db dump files */
|
|
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
|
|
{
|
|
char sql_file_name[MAXPGPATH], log_file_name[MAXPGPATH];
|
|
DbInfo *old_db = &old_cluster.dbarr.dbs[dbnum];
|
|
|
|
pg_log(PG_STATUS, "%s", old_db->db_name);
|
|
snprintf(sql_file_name, sizeof(sql_file_name), DB_DUMP_FILE_MASK, old_db->db_oid);
|
|
snprintf(log_file_name, sizeof(log_file_name), DB_DUMP_LOG_FILE_MASK, old_db->db_oid);
|
|
|
|
parallel_exec_prog(log_file_name, NULL,
|
|
"\"%s/pg_dump\" %s --schema-only --binary-upgrade --format=custom %s --file=\"%s\" \"%s\"",
|
|
new_cluster.bindir, cluster_conn_opts(&old_cluster),
|
|
log_opts.verbose ? "--verbose" : "", sql_file_name, old_db->db_name);
|
|
}
|
|
|
|
/* reap all children */
|
|
while (reap_child(true) == true)
|
|
;
|
|
|
|
end_progress_output();
|
|
check_ok();
|
|
}
|