mirror of
https://git.postgresql.org/git/postgresql.git
synced 2025-01-24 18:55:04 +08:00
In pg_upgrade, use cached copy of directory listing, rather than calling
scandir() with a pattern for every table. Optimization after report of pg_upgrade slowness with 150k tables.
This commit is contained in:
parent
07456b45e6
commit
ad06db2610
@ -30,8 +30,8 @@ static void map_rel_by_id(Oid oldid, Oid newid,
|
||||
const char *old_tablespace, const DbInfo *old_db,
|
||||
const DbInfo *new_db, const char *olddata,
|
||||
const char *newdata, FileNameMap *map);
|
||||
static RelInfo *relarr_lookup_reloid(
|
||||
RelInfoArr *rel_arr, Oid oid, Cluster whichCluster);
|
||||
static RelInfo *relarr_lookup_reloid(RelInfoArr *rel_arr,
|
||||
Oid oid, Cluster whichCluster);
|
||||
static RelInfo *relarr_lookup_rel(RelInfoArr *rel_arr,
|
||||
const char *nspname, const char *relname,
|
||||
Cluster whichCluster);
|
||||
@ -172,8 +172,8 @@ map_rel_by_id(Oid oldid, Oid newid,
|
||||
* relation belongs to the default tablespace, hence relfiles would
|
||||
* exist in the data directories.
|
||||
*/
|
||||
snprintf(map->old_file, sizeof(map->old_file), "%s/base/%u", olddata, old_db->db_oid);
|
||||
snprintf(map->new_file, sizeof(map->new_file), "%s/base/%u", newdata, new_db->db_oid);
|
||||
snprintf(map->old_dir, sizeof(map->old_dir), "%s/base/%u", olddata, old_db->db_oid);
|
||||
snprintf(map->new_dir, sizeof(map->new_dir), "%s/base/%u", newdata, new_db->db_oid);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -181,9 +181,9 @@ map_rel_by_id(Oid oldid, Oid newid,
|
||||
* relation belongs to some tablespace, hence copy its physical
|
||||
* location
|
||||
*/
|
||||
snprintf(map->old_file, sizeof(map->old_file), "%s%s/%u", old_tablespace,
|
||||
snprintf(map->old_dir, sizeof(map->old_dir), "%s%s/%u", old_tablespace,
|
||||
old_cluster.tablespace_suffix, old_db->db_oid);
|
||||
snprintf(map->new_file, sizeof(map->new_file), "%s%s/%u", old_tablespace,
|
||||
snprintf(map->new_dir, sizeof(map->new_dir), "%s%s/%u", old_tablespace,
|
||||
new_cluster.tablespace_suffix, new_db->db_oid);
|
||||
}
|
||||
}
|
||||
@ -318,6 +318,7 @@ get_rel_infos(const DbInfo *dbinfo,
|
||||
* pg_largeobject_loid_pn_index's relfilenode can change if the table was
|
||||
* reindexed so we get the relfilenode for each database and migrate it as
|
||||
* a normal user table.
|
||||
* Order by tablespace so we can cache the directory contents efficiently.
|
||||
*/
|
||||
|
||||
snprintf(query, sizeof(query),
|
||||
@ -338,7 +339,7 @@ get_rel_infos(const DbInfo *dbinfo,
|
||||
"GROUP BY c.oid, n.nspname, c.relname, c.relfilenode,"
|
||||
" c.reltoastrelid, t.spclocation, "
|
||||
" n.nspname "
|
||||
"ORDER BY n.nspname, c.relname;",
|
||||
"ORDER BY t.spclocation, n.nspname, c.relname;",
|
||||
FirstNormalObjectId,
|
||||
/* see the comment at the top of old_8_3_create_sequence_script() */
|
||||
(GET_MAJOR_VERSION(old_cluster.major_version) <= 803) ?
|
||||
|
@ -85,10 +85,10 @@ typedef struct
|
||||
*/
|
||||
typedef struct
|
||||
{
|
||||
char old_dir[MAXPGPATH];
|
||||
char new_dir[MAXPGPATH];
|
||||
Oid old_relfilenode; /* Relfilenode of the old relation */
|
||||
Oid new_relfilenode; /* Relfilenode of the new relation */
|
||||
char old_file[MAXPGPATH];
|
||||
char new_file[MAXPGPATH];
|
||||
char old_nspname[NAMEDATALEN]; /* old name of the namespace */
|
||||
char old_relname[NAMEDATALEN]; /* old name of the relation */
|
||||
char new_nspname[NAMEDATALEN]; /* new name of the namespace */
|
||||
@ -255,10 +255,8 @@ void check_old_cluster(bool live_check,
|
||||
char **sequence_script_file_name);
|
||||
void check_new_cluster(void);
|
||||
void report_clusters_compatible(void);
|
||||
void issue_warnings(
|
||||
char *sequence_script_file_name);
|
||||
void output_completion_banner(
|
||||
char *deletion_script_file_name);
|
||||
void issue_warnings(char *sequence_script_file_name);
|
||||
void output_completion_banner(char *deletion_script_file_name);
|
||||
void check_cluster_versions(void);
|
||||
void check_cluster_compatibility(bool live_check);
|
||||
void create_script_for_old_cluster_deletion(char **deletion_script_file_name);
|
||||
@ -319,14 +317,12 @@ typedef void *pageCnvCtx;
|
||||
#endif
|
||||
|
||||
int dir_matching_filenames(const struct dirent * scan_ent);
|
||||
int pg_scandir(const char *dirname,
|
||||
struct dirent *** namelist,
|
||||
int (*selector) (const struct dirent *));
|
||||
const char *copyAndUpdateFile(
|
||||
pageCnvCtx *pageConverter, const char *src,
|
||||
int pg_scandir(const char *dirname, struct dirent *** namelist,
|
||||
int (*selector) (const struct dirent *));
|
||||
const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
|
||||
const char *dst, bool force);
|
||||
const char *linkAndUpdateFile(
|
||||
pageCnvCtx *pageConverter, const char *src, const char *dst);
|
||||
const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
|
||||
const char *dst);
|
||||
|
||||
void check_hard_link(void);
|
||||
|
||||
@ -374,8 +370,7 @@ PGresult *executeQueryOrDie(PGconn *conn,
|
||||
|
||||
void start_postmaster(Cluster whichCluster, bool quiet);
|
||||
void stop_postmaster(bool fast, bool quiet);
|
||||
uint32 get_major_server_version(char **verstr,
|
||||
Cluster whichCluster);
|
||||
uint32 get_major_server_version(char **verstr, Cluster whichCluster);
|
||||
void check_for_libpq_envvars(void);
|
||||
|
||||
|
||||
|
@ -110,20 +110,38 @@ static void
|
||||
transfer_single_new_db(pageCnvCtx *pageConverter,
|
||||
FileNameMap *maps, int size)
|
||||
{
|
||||
char old_dir[MAXPGPATH];
|
||||
struct dirent **namelist = NULL;
|
||||
int numFiles = 0;
|
||||
int mapnum;
|
||||
int fileno;
|
||||
|
||||
old_dir[0] = '\0';
|
||||
|
||||
for (mapnum = 0; mapnum < size; mapnum++)
|
||||
{
|
||||
char old_file[MAXPGPATH];
|
||||
char new_file[MAXPGPATH];
|
||||
struct dirent **namelist = NULL;
|
||||
int numFiles;
|
||||
|
||||
/* Changed tablespaces? Need a new directory scan? */
|
||||
if (strcmp(maps[mapnum].old_dir, old_dir) != 0)
|
||||
{
|
||||
if (numFiles > 0)
|
||||
{
|
||||
for (fileno = 0; fileno < numFiles; fileno++)
|
||||
pg_free(namelist[fileno]);
|
||||
pg_free(namelist);
|
||||
}
|
||||
|
||||
snprintf(old_dir, sizeof(old_dir), "%s", maps[mapnum].old_dir);
|
||||
numFiles = pg_scandir(old_dir, &namelist, NULL);
|
||||
}
|
||||
|
||||
/* Copying files might take some time, so give feedback. */
|
||||
|
||||
snprintf(old_file, sizeof(old_file), "%s/%u", maps[mapnum].old_file,
|
||||
snprintf(old_file, sizeof(old_file), "%s/%u", maps[mapnum].old_dir,
|
||||
maps[mapnum].old_relfilenode);
|
||||
snprintf(new_file, sizeof(new_file), "%s/%u", maps[mapnum].new_file,
|
||||
snprintf(new_file, sizeof(new_file), "%s/%u", maps[mapnum].new_dir,
|
||||
maps[mapnum].new_relfilenode);
|
||||
pg_log(PG_REPORT, OVERWRITE_MESSAGE, old_file);
|
||||
|
||||
@ -139,28 +157,27 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
|
||||
if (GET_MAJOR_VERSION(old_cluster.major_version) >= 804)
|
||||
{
|
||||
/*
|
||||
* Now copy/link any fsm and vm files, if they exist
|
||||
* Copy/link any fsm and vm files, if they exist
|
||||
*/
|
||||
snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u_",
|
||||
maps[mapnum].old_relfilenode);
|
||||
numFiles = pg_scandir(maps[mapnum].old_file, &namelist, dir_matching_filenames);
|
||||
|
||||
while (numFiles--)
|
||||
for (fileno = 0; fileno < numFiles; fileno++)
|
||||
{
|
||||
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_file,
|
||||
namelist[numFiles]->d_name);
|
||||
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_file,
|
||||
maps[mapnum].new_relfilenode, strchr(namelist[numFiles]->d_name, '_'));
|
||||
|
||||
unlink(new_file);
|
||||
transfer_relfile(pageConverter, old_file, new_file,
|
||||
maps[mapnum].old_nspname, maps[mapnum].old_relname,
|
||||
maps[mapnum].new_nspname, maps[mapnum].new_relname);
|
||||
|
||||
pg_free(namelist[numFiles]);
|
||||
if (strncmp(namelist[fileno]->d_name, scandir_file_pattern,
|
||||
strlen(scandir_file_pattern)) == 0)
|
||||
{
|
||||
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_dir,
|
||||
namelist[fileno]->d_name);
|
||||
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_dir,
|
||||
maps[mapnum].new_relfilenode, strchr(namelist[fileno]->d_name, '_'));
|
||||
|
||||
unlink(new_file);
|
||||
transfer_relfile(pageConverter, old_file, new_file,
|
||||
maps[mapnum].old_nspname, maps[mapnum].old_relname,
|
||||
maps[mapnum].new_nspname, maps[mapnum].new_relname);
|
||||
}
|
||||
}
|
||||
|
||||
pg_free(namelist);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -172,23 +189,30 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
|
||||
*/
|
||||
snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u.",
|
||||
maps[mapnum].old_relfilenode);
|
||||
numFiles = pg_scandir(maps[mapnum].old_file, &namelist, dir_matching_filenames);
|
||||
|
||||
while (numFiles--)
|
||||
for (fileno = 0; fileno < numFiles; fileno++)
|
||||
{
|
||||
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_file,
|
||||
namelist[numFiles]->d_name);
|
||||
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_file,
|
||||
maps[mapnum].new_relfilenode, strchr(namelist[numFiles]->d_name, '.'));
|
||||
if (strncmp(namelist[fileno]->d_name, scandir_file_pattern,
|
||||
strlen(scandir_file_pattern)) == 0)
|
||||
{
|
||||
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_dir,
|
||||
namelist[fileno]->d_name);
|
||||
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_dir,
|
||||
maps[mapnum].new_relfilenode, strchr(namelist[fileno]->d_name, '.'));
|
||||
|
||||
unlink(new_file);
|
||||
transfer_relfile(pageConverter, old_file, new_file,
|
||||
maps[mapnum].old_nspname, maps[mapnum].old_relname,
|
||||
maps[mapnum].new_nspname, maps[mapnum].new_relname);
|
||||
|
||||
pg_free(namelist[numFiles]);
|
||||
unlink(new_file);
|
||||
transfer_relfile(pageConverter, old_file, new_file,
|
||||
maps[mapnum].old_nspname, maps[mapnum].old_relname,
|
||||
maps[mapnum].new_nspname, maps[mapnum].new_relname);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (numFiles > 0)
|
||||
{
|
||||
for (fileno = 0; fileno < numFiles; fileno++)
|
||||
pg_free(namelist[fileno]);
|
||||
pg_free(namelist);
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user