diff --git a/contrib/basebackup_to_shell/t/001_basic.pl b/contrib/basebackup_to_shell/t/001_basic.pl index 5b1c7894c4a..6ffc89433a9 100644 --- a/contrib/basebackup_to_shell/t/001_basic.pl +++ b/contrib/basebackup_to_shell/t/001_basic.pl @@ -25,7 +25,7 @@ my $node = PostgreSQL::Test::Cluster->new('primary'); # This is only needed on Windows machines that don't use UNIX sockets. $node->init( 'allows_streaming' => 1, - 'auth_extra' => [ '--create-role', 'backupuser' ]); + 'auth_extra' => [ '--create-role' => 'backupuser' ]); $node->append_conf('postgresql.conf', "shared_preload_libraries = 'basebackup_to_shell'"); @@ -37,15 +37,19 @@ $node->safe_psql('postgres', 'CREATE ROLE trustworthy'); # to keep test times reasonable. Using @pg_basebackup_defs as the first # element of the array passed to IPC::Run interpolate the array (as it is # not a reference to an array)... -my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast'); +my @pg_basebackup_defs = + ('pg_basebackup', '--no-sync', '--checkpoint' => 'fast'); # This particular test module generally wants to run with -Xfetch, because # -Xstream is not supported with a backup target, and with -U backupuser. -my @pg_basebackup_cmd = (@pg_basebackup_defs, '-U', 'backupuser', '-Xfetch'); +my @pg_basebackup_cmd = ( + @pg_basebackup_defs, + '--username' => 'backupuser', + '--wal-method' => 'fetch'); # Can't use this module without setting basebackup_to_shell.command. $node->command_fails_like( - [ @pg_basebackup_cmd, '--target', 'shell' ], + [ @pg_basebackup_cmd, '--target' => 'shell' ], qr/shell command for backup is not configured/, 'fails if basebackup_to_shell.command is not set'); @@ -64,13 +68,13 @@ $node->reload(); # Should work now. $node->command_ok( - [ @pg_basebackup_cmd, '--target', 'shell' ], + [ @pg_basebackup_cmd, '--target' => 'shell' ], 'backup with no detail: pg_basebackup'); verify_backup('', $backup_path, "backup with no detail"); # Should fail with a detail. $node->command_fails_like( - [ @pg_basebackup_cmd, '--target', 'shell:foo' ], + [ @pg_basebackup_cmd, '--target' => 'shell:foo' ], qr/a target detail is not permitted because the configured command does not include %d/, 'fails if detail provided without %d'); @@ -87,19 +91,19 @@ $node->reload(); # Should fail due to lack of permission. $node->command_fails_like( - [ @pg_basebackup_cmd, '--target', 'shell' ], + [ @pg_basebackup_cmd, '--target' => 'shell' ], qr/permission denied to use basebackup_to_shell/, 'fails if required_role not granted'); # Should fail due to lack of a detail. $node->safe_psql('postgres', 'GRANT trustworthy TO backupuser'); $node->command_fails_like( - [ @pg_basebackup_cmd, '--target', 'shell' ], + [ @pg_basebackup_cmd, '--target' => 'shell' ], qr/a target detail is required because the configured command includes %d/, 'fails if %d is present and detail not given'); # Should work. -$node->command_ok([ @pg_basebackup_cmd, '--target', 'shell:bar' ], +$node->command_ok([ @pg_basebackup_cmd, '--target' => 'shell:bar' ], 'backup with detail: pg_basebackup'); verify_backup('bar.', $backup_path, "backup with detail"); @@ -133,9 +137,11 @@ sub verify_backup # Verify. $node->command_ok( [ - 'pg_verifybackup', '-n', - '-m', "${backup_dir}/${prefix}backup_manifest", - '-e', $extract_path + 'pg_verifybackup', + '--no-parse-wal', + '--manifest-path' => "${backup_dir}/${prefix}backup_manifest", + '--exit-on-error', + $extract_path ], "$test_name: backup verifies ok"); } diff --git a/src/bin/initdb/t/001_initdb.pl b/src/bin/initdb/t/001_initdb.pl index f114c2a1b62..5cff5ce5e4d 100644 --- a/src/bin/initdb/t/001_initdb.pl +++ b/src/bin/initdb/t/001_initdb.pl @@ -22,21 +22,19 @@ program_help_ok('initdb'); program_version_ok('initdb'); program_options_handling_ok('initdb'); -command_fails([ 'initdb', '-S', "$tempdir/nonexistent" ], +command_fails([ 'initdb', '--sync-only', "$tempdir/nonexistent" ], 'sync missing data directory'); mkdir $xlogdir; mkdir "$xlogdir/lost+found"; -command_fails( - [ 'initdb', '-X', $xlogdir, $datadir ], +command_fails([ 'initdb', '--waldir' => $xlogdir, $datadir ], 'existing nonempty xlog directory'); rmdir "$xlogdir/lost+found"; command_fails( - [ 'initdb', '-X', 'pgxlog', $datadir ], + [ 'initdb', '--waldir' => 'pgxlog', $datadir ], 'relative xlog directory not allowed'); -command_fails( - [ 'initdb', '-U', 'pg_test', $datadir ], +command_fails([ 'initdb', '--username' => 'pg_test', $datadir ], 'role names cannot begin with "pg_"'); mkdir $datadir; @@ -49,12 +47,15 @@ mkdir $datadir; local (%ENV) = %ENV; delete $ENV{TZ}; - # while we are here, also exercise -T and -c options + # while we are here, also exercise --text-search-config and --set options command_ok( [ - 'initdb', '-N', '-T', 'german', '-c', - 'default_text_search_config=german', - '-X', $xlogdir, $datadir + 'initdb', + '--no-sync', + '--text-search-config' => 'german', + '--set' => 'default_text_search_config=german', + '--waldir' => $xlogdir, + $datadir ], 'successful creation'); @@ -75,17 +76,19 @@ command_like( qr/Data page checksum version:.*1/, 'checksums are enabled in control file'); -command_ok([ 'initdb', '-S', $datadir ], 'sync only'); +command_ok([ 'initdb', '--sync-only', $datadir ], 'sync only'); command_fails([ 'initdb', $datadir ], 'existing data directory'); if ($supports_syncfs) { - command_ok([ 'initdb', '-S', $datadir, '--sync-method', 'syncfs' ], + command_ok( + [ 'initdb', '--sync-only', $datadir, '--sync-method' => 'syncfs' ], 'sync method syncfs'); } else { - command_fails([ 'initdb', '-S', $datadir, '--sync-method', 'syncfs' ], + command_fails( + [ 'initdb', '--sync-only', $datadir, '--sync-method' => 'syncfs' ], 'sync method syncfs'); } @@ -126,7 +129,7 @@ if ($ENV{with_icu} eq 'yes') command_like( [ 'initdb', '--no-sync', - '-A', 'trust', + '-A' => 'trust', '--locale-provider=icu', '--locale=und', '--lc-collate=C', '--lc-ctype=C', '--lc-messages=C', '--lc-numeric=C', @@ -246,7 +249,8 @@ command_fails( ], 'fails for invalid option combination'); -command_fails([ 'initdb', '--no-sync', '--set', 'foo=bar', "$tempdir/dataX" ], +command_fails( + [ 'initdb', '--no-sync', '--set' => 'foo=bar', "$tempdir/dataX" ], 'fails for invalid --set option'); # Make sure multiple invocations of -c parameters are added case insensitive @@ -279,7 +283,7 @@ command_like( # not part of the tests included in pg_checksums to save from # the creation of an extra instance. command_fails( - [ 'pg_checksums', '-D', $datadir_nochecksums ], + [ 'pg_checksums', '--pgdata' => $datadir_nochecksums ], "pg_checksums fails with data checksum disabled"); done_testing(); diff --git a/src/bin/pg_amcheck/t/002_nonesuch.pl b/src/bin/pg_amcheck/t/002_nonesuch.pl index 5a7377e627e..f6011d000b7 100644 --- a/src/bin/pg_amcheck/t/002_nonesuch.pl +++ b/src/bin/pg_amcheck/t/002_nonesuch.pl @@ -30,7 +30,7 @@ $node->command_checks_all( # Failing to resolve a database pattern is an error by default. $node->command_checks_all( - [ 'pg_amcheck', '-d', 'qqq', '-d', 'postgres' ], + [ 'pg_amcheck', '--database' => 'qqq', '--database' => 'postgres' ], 1, [qr/^$/], [qr/pg_amcheck: error: no connectable databases to check matching "qqq"/], @@ -38,7 +38,12 @@ $node->command_checks_all( # But only a warning under --no-strict-names $node->command_checks_all( - [ 'pg_amcheck', '--no-strict-names', '-d', 'qqq', '-d', 'postgres' ], + [ + 'pg_amcheck', + '--no-strict-names', + '--database' => 'qqq', + '--database' => 'postgres' + ], 0, [qr/^$/], [ @@ -49,7 +54,7 @@ $node->command_checks_all( # Check that a substring of an existent database name does not get interpreted # as a matching pattern. $node->command_checks_all( - [ 'pg_amcheck', '-d', 'post', '-d', 'postgres' ], + [ 'pg_amcheck', '--database' => 'post', '--database' => 'postgres' ], 1, [qr/^$/], [ @@ -61,7 +66,11 @@ $node->command_checks_all( # Check that a superstring of an existent database name does not get interpreted # as a matching pattern. $node->command_checks_all( - [ 'pg_amcheck', '-d', 'postgresql', '-d', 'postgres' ], + [ + 'pg_amcheck', + '--database' => 'postgresql', + '--database' => 'postgres' + ], 1, [qr/^$/], [ @@ -74,7 +83,8 @@ $node->command_checks_all( # Test connecting with a non-existent user # Failing to connect to the initial database due to bad username is an error. -$node->command_checks_all([ 'pg_amcheck', '-U', 'no_such_user', 'postgres' ], +$node->command_checks_all( + [ 'pg_amcheck', '--username' => 'no_such_user', 'postgres' ], 1, [qr/^$/], [], 'checking with a non-existent user'); ######################################### @@ -96,7 +106,7 @@ $node->command_checks_all( # Again, but this time with another database to check, so no error is raised. $node->command_checks_all( - [ 'pg_amcheck', '-d', 'template1', '-d', 'postgres' ], + [ 'pg_amcheck', '--database' => 'template1', '--database' => 'postgres' ], 0, [qr/^$/], [ @@ -121,7 +131,7 @@ $node->command_checks_all( # Check three-part unreasonable pattern that has zero-length names $node->command_checks_all( - [ 'pg_amcheck', '-d', 'postgres', '-t', '..' ], + [ 'pg_amcheck', '--database' => 'postgres', '--table' => '..' ], 1, [qr/^$/], [ @@ -131,7 +141,7 @@ $node->command_checks_all( # Again, but with non-trivial schema and relation parts $node->command_checks_all( - [ 'pg_amcheck', '-d', 'postgres', '-t', '.foo.bar' ], + [ 'pg_amcheck', '--database' => 'postgres', '--table' => '.foo.bar' ], 1, [qr/^$/], [ @@ -141,7 +151,7 @@ $node->command_checks_all( # Check two-part unreasonable pattern that has zero-length names $node->command_checks_all( - [ 'pg_amcheck', '-d', 'postgres', '-t', '.' ], + [ 'pg_amcheck', '--database' => 'postgres', '--table' => '.' ], 1, [qr/^$/], [qr/pg_amcheck: error: no heap tables to check matching "\."/], @@ -149,7 +159,7 @@ $node->command_checks_all( # Check that a multipart database name is rejected $node->command_checks_all( - [ 'pg_amcheck', '-d', 'localhost.postgres' ], + [ 'pg_amcheck', '--database' => 'localhost.postgres' ], 2, [qr/^$/], [ @@ -159,7 +169,7 @@ $node->command_checks_all( # Check that a three-part schema name is rejected $node->command_checks_all( - [ 'pg_amcheck', '-s', 'localhost.postgres.pg_catalog' ], + [ 'pg_amcheck', '--schema' => 'localhost.postgres.pg_catalog' ], 2, [qr/^$/], [ @@ -169,7 +179,7 @@ $node->command_checks_all( # Check that a four-part table name is rejected $node->command_checks_all( - [ 'pg_amcheck', '-t', 'localhost.postgres.pg_catalog.pg_class' ], + [ 'pg_amcheck', '--table' => 'localhost.postgres.pg_catalog.pg_class' ], 2, [qr/^$/], [ @@ -183,7 +193,7 @@ $node->command_checks_all( $node->command_checks_all( [ 'pg_amcheck', '--no-strict-names', - '-t', 'this.is.a.really.long.dotted.string' + '--table' => 'this.is.a.really.long.dotted.string' ], 2, [qr/^$/], @@ -193,8 +203,8 @@ $node->command_checks_all( 'ungrammatical table names still draw errors under --no-strict-names'); $node->command_checks_all( [ - 'pg_amcheck', '--no-strict-names', '-s', - 'postgres.long.dotted.string' + 'pg_amcheck', '--no-strict-names', + '--schema' => 'postgres.long.dotted.string' ], 2, [qr/^$/], @@ -204,8 +214,8 @@ $node->command_checks_all( 'ungrammatical schema names still draw errors under --no-strict-names'); $node->command_checks_all( [ - 'pg_amcheck', '--no-strict-names', '-d', - 'postgres.long.dotted.string' + 'pg_amcheck', '--no-strict-names', + '--database' => 'postgres.long.dotted.string' ], 2, [qr/^$/], @@ -216,7 +226,7 @@ $node->command_checks_all( # Likewise for exclusion patterns $node->command_checks_all( - [ 'pg_amcheck', '--no-strict-names', '-T', 'a.b.c.d' ], + [ 'pg_amcheck', '--no-strict-names', '--exclude-table' => 'a.b.c.d' ], 2, [qr/^$/], [ @@ -225,7 +235,7 @@ $node->command_checks_all( 'ungrammatical table exclusions still draw errors under --no-strict-names' ); $node->command_checks_all( - [ 'pg_amcheck', '--no-strict-names', '-S', 'a.b.c' ], + [ 'pg_amcheck', '--no-strict-names', '--exclude-schema' => 'a.b.c' ], 2, [qr/^$/], [ @@ -234,7 +244,7 @@ $node->command_checks_all( 'ungrammatical schema exclusions still draw errors under --no-strict-names' ); $node->command_checks_all( - [ 'pg_amcheck', '--no-strict-names', '-D', 'a.b' ], + [ 'pg_amcheck', '--no-strict-names', '--exclude-database' => 'a.b' ], 2, [qr/^$/], [ @@ -252,20 +262,20 @@ $node->command_checks_all( $node->command_checks_all( [ 'pg_amcheck', '--no-strict-names', - '-t', 'no_such_table', - '-t', 'no*such*table', - '-i', 'no_such_index', - '-i', 'no*such*index', - '-r', 'no_such_relation', - '-r', 'no*such*relation', - '-d', 'no_such_database', - '-d', 'no*such*database', - '-r', 'none.none', - '-r', 'none.none.none', - '-r', 'postgres.none.none', - '-r', 'postgres.pg_catalog.none', - '-r', 'postgres.none.pg_class', - '-t', 'postgres.pg_catalog.pg_class', # This exists + '--table' => 'no_such_table', + '--table' => 'no*such*table', + '--index' => 'no_such_index', + '--index' => 'no*such*index', + '--relation' => 'no_such_relation', + '--relation' => 'no*such*relation', + '--database' => 'no_such_database', + '--database' => 'no*such*database', + '--relation' => 'none.none', + '--relation' => 'none.none.none', + '--relation' => 'postgres.none.none', + '--relation' => 'postgres.pg_catalog.none', + '--relation' => 'postgres.none.pg_class', + '--table' => 'postgres.pg_catalog.pg_class', # This exists ], 0, [qr/^$/], @@ -302,7 +312,7 @@ $node->safe_psql( )); $node->command_checks_all( - [ 'pg_amcheck', '-d', 'regression_invalid' ], + [ 'pg_amcheck', '--database' => 'regression_invalid' ], 1, [qr/^$/], [ @@ -312,7 +322,9 @@ $node->command_checks_all( $node->command_checks_all( [ - 'pg_amcheck', '-d', 'postgres', '-t', 'regression_invalid.public.foo', + 'pg_amcheck', + '--database' => 'postgres', + '--table' => 'regression_invalid.public.foo', ], 1, [qr/^$/], @@ -334,14 +346,15 @@ $node->safe_psql('postgres', q(CREATE DATABASE another_db)); $node->command_checks_all( [ - 'pg_amcheck', '-d', - 'postgres', '--no-strict-names', - '-t', 'template1.public.foo', - '-t', 'another_db.public.foo', - '-t', 'no_such_database.public.foo', - '-i', 'template1.public.foo_idx', - '-i', 'another_db.public.foo_idx', - '-i', 'no_such_database.public.foo_idx', + 'pg_amcheck', + '--database' => 'postgres', + '--no-strict-names', + '--table' => 'template1.public.foo', + '--table' => 'another_db.public.foo', + '--table' => 'no_such_database.public.foo', + '--index' => 'template1.public.foo_idx', + '--index' => 'another_db.public.foo_idx', + '--index' => 'no_such_database.public.foo_idx', ], 1, [qr/^$/], @@ -364,9 +377,13 @@ $node->command_checks_all( # Check with only schema exclusion patterns $node->command_checks_all( [ - 'pg_amcheck', '--all', '--no-strict-names', '-S', - 'public', '-S', 'pg_catalog', '-S', - 'pg_toast', '-S', 'information_schema', + 'pg_amcheck', + '--all', + '--no-strict-names', + '--exclude-schema' => 'public', + '--exclude-schema' => 'pg_catalog', + '--exclude-schema' => 'pg_toast', + '--exclude-schema' => 'information_schema', ], 1, [qr/^$/], @@ -379,10 +396,15 @@ $node->command_checks_all( # Check with schema exclusion patterns overriding relation and schema inclusion patterns $node->command_checks_all( [ - 'pg_amcheck', '--all', '--no-strict-names', '-s', - 'public', '-s', 'pg_catalog', '-s', - 'pg_toast', '-s', 'information_schema', '-t', - 'pg_catalog.pg_class', '-S*' + 'pg_amcheck', + '--all', + '--no-strict-names', + '--schema' => 'public', + '--schema' => 'pg_catalog', + '--schema' => 'pg_toast', + '--schema' => 'information_schema', + '--table' => 'pg_catalog.pg_class', + '--exclude-schema' => '*' ], 1, [qr/^$/], diff --git a/src/bin/pg_amcheck/t/003_check.pl b/src/bin/pg_amcheck/t/003_check.pl index fe9122370ff..881854da254 100644 --- a/src/bin/pg_amcheck/t/003_check.pl +++ b/src/bin/pg_amcheck/t/003_check.pl @@ -319,7 +319,7 @@ plan_to_remove_relation_file('db2', 's1.t1_btree'); # # Standard first arguments to PostgreSQL::Test::Utils functions -my @cmd = ('pg_amcheck', '-p', $port); +my @cmd = ('pg_amcheck', '--port' => $port); # Regular expressions to match various expected output my $no_output_re = qr/^$/; @@ -332,8 +332,17 @@ my $index_missing_relation_fork_re = # yet corrupted anything. As such, we expect no corruption and verify that # none is reported # -$node->command_checks_all([ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3' ], - 0, [$no_output_re], [$no_output_re], 'pg_amcheck prior to corruption'); +$node->command_checks_all( + [ + @cmd, + '--database' => 'db1', + '--database' => 'db2', + '--database' => 'db3' + ], + 0, + [$no_output_re], + [$no_output_re], + 'pg_amcheck prior to corruption'); # Perform the corruptions we planned above using only a single database restart. # @@ -356,7 +365,12 @@ $node->command_checks_all( 'pg_amcheck all schemas, tables and indexes in database db1'); $node->command_checks_all( - [ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3' ], + [ + @cmd, + '--database' => 'db1', + '--database' => 'db2', + '--database' => 'db3' + ], 2, [ $index_missing_relation_fork_re, $line_pointer_corruption_re, @@ -376,7 +390,7 @@ $node->command_checks_all( # complaint on stderr, but otherwise stderr should be quiet. # $node->command_checks_all( - [ @cmd, '--all', '-s', 's1', '-i', 't1_btree' ], + [ @cmd, '--all', '--schema' => 's1', '--index' => 't1_btree' ], 2, [$index_missing_relation_fork_re], [ @@ -385,7 +399,12 @@ $node->command_checks_all( 'pg_amcheck index s1.t1_btree reports missing main relation fork'); $node->command_checks_all( - [ @cmd, '-d', 'db1', '-s', 's1', '-i', 't2_btree' ], + [ + @cmd, + '--database' => 'db1', + '--schema' => 's1', + '--index' => 't2_btree' + ], 2, [qr/.+/], # Any non-empty error message is acceptable [$no_output_re], @@ -396,22 +415,24 @@ $node->command_checks_all( # are quiet. # $node->command_checks_all( - [ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db1' ], + [ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db1' ], 0, [$no_output_re], [$no_output_re], 'pg_amcheck of db1.s1 excluding indexes'); # Checking db2.s1 should show table corruptions if indexes are excluded # $node->command_checks_all( - [ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db2' ], - 2, [$missing_file_re], [$no_output_re], + [ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db2' ], + 2, + [$missing_file_re], + [$no_output_re], 'pg_amcheck of db2.s1 excluding indexes'); # In schema db1.s3, the tables and indexes are both corrupt. We should see # corruption messages on stdout, and nothing on stderr. # $node->command_checks_all( - [ @cmd, '-s', 's3', 'db1' ], + [ @cmd, '--schema' => 's3', 'db1' ], 2, [ $index_missing_relation_fork_re, $line_pointer_corruption_re, @@ -423,13 +444,16 @@ $node->command_checks_all( # In schema db1.s4, only toast tables are corrupt. Check that under default # options the toast corruption is reported, but when excluding toast we get no # error reports. -$node->command_checks_all([ @cmd, '-s', 's4', 'db1' ], +$node->command_checks_all([ @cmd, '--schema' => 's4', 'db1' ], 2, [$missing_file_re], [$no_output_re], 'pg_amcheck in schema s4 reports toast corruption'); $node->command_checks_all( [ - @cmd, '--no-dependent-toast', '--exclude-toast-pointers', '-s', 's4', + @cmd, + '--no-dependent-toast', + '--exclude-toast-pointers', + '--schema' => 's4', 'db1' ], 0, @@ -438,7 +462,7 @@ $node->command_checks_all( 'pg_amcheck in schema s4 excluding toast reports no corruption'); # Check that no corruption is reported in schema db1.s5 -$node->command_checks_all([ @cmd, '-s', 's5', 'db1' ], +$node->command_checks_all([ @cmd, '--schema' => 's5', 'db1' ], 0, [$no_output_re], [$no_output_re], 'pg_amcheck over schema s5 reports no corruption'); @@ -446,7 +470,13 @@ $node->command_checks_all([ @cmd, '-s', 's5', 'db1' ], # the indexes, no corruption is reported about the schema. # $node->command_checks_all( - [ @cmd, '-s', 's1', '-I', 't1_btree', '-I', 't2_btree', 'db1' ], + [ + @cmd, + '--schema' => 's1', + '--exclude-index' => 't1_btree', + '--exclude-index' => 't2_btree', + 'db1' + ], 0, [$no_output_re], [$no_output_re], @@ -458,7 +488,7 @@ $node->command_checks_all( # about the schema. # $node->command_checks_all( - [ @cmd, '-t', 's1.*', '--no-dependent-indexes', 'db1' ], + [ @cmd, '--table' => 's1.*', '--no-dependent-indexes', 'db1' ], 0, [$no_output_re], [$no_output_re], @@ -469,7 +499,13 @@ $node->command_checks_all( # tables that no corruption is reported. # $node->command_checks_all( - [ @cmd, '-s', 's2', '-T', 't1', '-T', 't2', 'db1' ], + [ + @cmd, + '--schema' => 's2', + '--exclude-table' => 't1', + '--exclude-table' => 't2', + 'db1' + ], 0, [$no_output_re], [$no_output_re], @@ -480,17 +516,23 @@ $node->command_checks_all( # to avoid getting messages about corrupt tables or indexes. # command_fails_like( - [ @cmd, '-s', 's5', '--startblock', 'junk', 'db1' ], + [ @cmd, '--schema' => 's5', '--startblock' => 'junk', 'db1' ], qr/invalid start block/, 'pg_amcheck rejects garbage startblock'); command_fails_like( - [ @cmd, '-s', 's5', '--endblock', '1234junk', 'db1' ], + [ @cmd, '--schema' => 's5', '--endblock' => '1234junk', 'db1' ], qr/invalid end block/, 'pg_amcheck rejects garbage endblock'); command_fails_like( - [ @cmd, '-s', 's5', '--startblock', '5', '--endblock', '4', 'db1' ], + [ + @cmd, + '--schema' => 's5', + '--startblock' => '5', + '--endblock' => '4', + 'db1' + ], qr/end block precedes start block/, 'pg_amcheck rejects invalid block range'); @@ -499,7 +541,12 @@ command_fails_like( # arguments are handled sensibly. # $node->command_checks_all( - [ @cmd, '-s', 's1', '-i', 't1_btree', '--parent-check', 'db1' ], + [ + @cmd, + '--schema' => 's1', + '--index' => 't1_btree', + '--parent-check', 'db1' + ], 2, [$index_missing_relation_fork_re], [$no_output_re], @@ -507,7 +554,10 @@ $node->command_checks_all( $node->command_checks_all( [ - @cmd, '-s', 's1', '-i', 't1_btree', '--heapallindexed', + @cmd, + '--schema' => 's1', + '--index' => 't1_btree', + '--heapallindexed', '--rootdescend', 'db1' ], 2, @@ -516,13 +566,24 @@ $node->command_checks_all( 'pg_amcheck smoke test --heapallindexed --rootdescend'); $node->command_checks_all( - [ @cmd, '-d', 'db1', '-d', 'db2', '-d', 'db3', '-S', 's*' ], - 0, [$no_output_re], [$no_output_re], + [ + @cmd, + '--database' => 'db1', + '--database' => 'db2', + '--database' => 'db3', + '--exclude-schema' => 's*' + ], + 0, + [$no_output_re], + [$no_output_re], 'pg_amcheck excluding all corrupt schemas'); $node->command_checks_all( [ - @cmd, '-s', 's1', '-i', 't1_btree', '--parent-check', + @cmd, + '--schema' => 's1', + '--index' => 't1_btree', + '--parent-check', '--checkunique', 'db1' ], 2, @@ -532,7 +593,10 @@ $node->command_checks_all( $node->command_checks_all( [ - @cmd, '-s', 's1', '-i', 't1_btree', '--heapallindexed', + @cmd, + '--schema' => 's1', + '--index' => 't1_btree', + '--heapallindexed', '--rootdescend', '--checkunique', 'db1' ], 2, @@ -542,8 +606,12 @@ $node->command_checks_all( $node->command_checks_all( [ - @cmd, '--checkunique', '-d', 'db1', '-d', 'db2', - '-d', 'db3', '-S', 's*' + @cmd, + '--checkunique', + '--database' => 'db1', + '--database' => 'db2', + '--database' => 'db3', + '--exclude-schema' => 's*' ], 0, [$no_output_re], diff --git a/src/bin/pg_amcheck/t/004_verify_heapam.pl b/src/bin/pg_amcheck/t/004_verify_heapam.pl index 541bdeec99b..2a3af2666f5 100644 --- a/src/bin/pg_amcheck/t/004_verify_heapam.pl +++ b/src/bin/pg_amcheck/t/004_verify_heapam.pl @@ -386,11 +386,12 @@ $node->start; # Check that pg_amcheck runs against the uncorrupted table without error. $node->command_ok( - [ 'pg_amcheck', '-p', $port, 'postgres' ], + [ 'pg_amcheck', '--port' => $port, 'postgres' ], 'pg_amcheck test table, prior to corruption'); # Check that pg_amcheck runs against the uncorrupted table and index without error. -$node->command_ok([ 'pg_amcheck', '-p', $port, 'postgres' ], +$node->command_ok( + [ 'pg_amcheck', '--port' => $port, 'postgres' ], 'pg_amcheck test table and index, prior to corruption'); $node->stop; @@ -754,7 +755,7 @@ $node->start; # Run pg_amcheck against the corrupt table with epoch=0, comparing actual # corruption messages against the expected messages $node->command_checks_all( - [ 'pg_amcheck', '--no-dependent-indexes', '-p', $port, 'postgres' ], + [ 'pg_amcheck', '--no-dependent-indexes', '--port' => $port, 'postgres' ], 2, [@expected], [], 'Expected corruption message output'); $node->safe_psql( 'postgres', qq( diff --git a/src/bin/pg_amcheck/t/005_opclass_damage.pl b/src/bin/pg_amcheck/t/005_opclass_damage.pl index 794e9ca2a3a..775014aabdc 100644 --- a/src/bin/pg_amcheck/t/005_opclass_damage.pl +++ b/src/bin/pg_amcheck/t/005_opclass_damage.pl @@ -52,7 +52,7 @@ $node->safe_psql( )); # We have not yet broken the index, so we should get no corruption -$node->command_like([ 'pg_amcheck', '-p', $node->port, 'postgres' ], +$node->command_like([ 'pg_amcheck', '--port' => $node->port, 'postgres' ], qr/^$/, 'pg_amcheck all schemas, tables and indexes reports no corruption'); @@ -69,7 +69,7 @@ $node->safe_psql( # Index corruption should now be reported $node->command_checks_all( - [ 'pg_amcheck', '-p', $node->port, 'postgres' ], + [ 'pg_amcheck', '--port' => $node->port, 'postgres' ], 2, [qr/item order invariant violated for index "fickleidx"/], [], @@ -90,7 +90,7 @@ $node->safe_psql( # We should get no corruptions $node->command_like( - [ 'pg_amcheck', '--checkunique', '-p', $node->port, 'postgres' ], + [ 'pg_amcheck', '--checkunique', '--port' => $node->port, 'postgres' ], qr/^$/, 'pg_amcheck all schemas, tables and indexes reports no corruption'); @@ -116,7 +116,7 @@ $node->safe_psql( # Unique index corruption should now be reported $node->command_checks_all( - [ 'pg_amcheck', '--checkunique', '-p', $node->port, 'postgres' ], + [ 'pg_amcheck', '--checkunique', '--port' => $node->port, 'postgres' ], 2, [qr/index uniqueness is violated for index "bttest_unique_idx"/], [], diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl index 9ef9f65dd70..81c82cf712d 100644 --- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl +++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl @@ -31,7 +31,7 @@ umask(0077); # Initialize node without replication settings $node->init( extra => ['--data-checksums'], - auth_extra => [ '--create-role', 'backupuser' ]); + auth_extra => [ '--create-role' => 'backupuser' ]); $node->start; my $pgdata = $node->data_dir; @@ -40,11 +40,19 @@ $node->command_fails(['pg_basebackup'], # Sanity checks for options $node->command_fails_like( - [ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', 'none:1' ], + [ + 'pg_basebackup', + '--pgdata' => "$tempdir/backup", + '--compress' => 'none:1' + ], qr/\Qcompression algorithm "none" does not accept a compression level/, 'failure if method "none" specified with compression level'); $node->command_fails_like( - [ 'pg_basebackup', '-D', "$tempdir/backup", '--compress', 'none+' ], + [ + 'pg_basebackup', + '--pgdata' => "$tempdir/backup", + '--compress' => 'none+' + ], qr/\Qunrecognized compression algorithm: "none+"/, 'failure on incorrect separator to define compression level'); @@ -60,7 +68,7 @@ $node->set_replication_conf(); $node->reload; $node->command_fails( - [ @pg_basebackup_defs, '-D', "$tempdir/backup" ], + [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup" ], 'pg_basebackup fails because of WAL configuration'); ok(!-d "$tempdir/backup", 'backup directory was cleaned up'); @@ -71,7 +79,8 @@ mkdir("$tempdir/backup") or BAIL_OUT("unable to create $tempdir/backup"); append_to_file("$tempdir/backup/dir-not-empty.txt", "Some data"); -$node->command_fails([ @pg_basebackup_defs, '-D', "$tempdir/backup", '-n' ], +$node->command_fails( + [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup", '-n' ], 'failing run with no-clean option'); ok(-d "$tempdir/backup", 'backup directory was created and left behind'); @@ -153,17 +162,17 @@ SKIP: my $sfail = quotemeta($server_fails . $cft->[1]); $node->command_fails_like( [ - 'pg_basebackup', '-D', - "$tempdir/backup", '--compress', - $cft->[0] + 'pg_basebackup', + '--pgdata' => "$tempdir/backup", + '--compress' => $cft->[0], ], qr/$cfail/, 'client ' . $cft->[2]); $node->command_fails_like( [ - 'pg_basebackup', '-D', - "$tempdir/backup", '--compress', - 'server-' . $cft->[0] + 'pg_basebackup', + '--pgdata' => "$tempdir/backup", + '--compress' => 'server-' . $cft->[0], ], qr/$sfail/, 'server ' . $cft->[2]); @@ -219,7 +228,11 @@ foreach my $filename (@tempRelationFiles) # Run base backup. $node->command_ok( - [ @pg_basebackup_defs, '-D', "$tempdir/backup", '-X', 'none' ], + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup", + '--wal-method' => 'none' + ], 'pg_basebackup runs'); ok(-f "$tempdir/backup/PG_VERSION", 'backup was created'); ok(-f "$tempdir/backup/backup_manifest", 'backup manifest included'); @@ -289,9 +302,10 @@ unlink("$pgdata/backup_label") $node->command_ok( [ - @pg_basebackup_defs, '-D', - "$tempdir/backup2", '--no-manifest', - '--waldir', "$tempdir/xlog2" + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup2", + '--no-manifest', + '--waldir' => "$tempdir/xlog2" ], 'separate xlog directory'); ok(-f "$tempdir/backup2/PG_VERSION", 'backup was created'); @@ -300,32 +314,64 @@ ok(-d "$tempdir/xlog2/", 'xlog directory was created'); rmtree("$tempdir/backup2"); rmtree("$tempdir/xlog2"); -$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/tarbackup", '-Ft' ], +$node->command_ok( + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/tarbackup", + '--format' => 'tar' + ], 'tar format'); ok(-f "$tempdir/tarbackup/base.tar", 'backup tar was created'); rmtree("$tempdir/tarbackup"); $node->command_fails( - [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T=/foo" ], - '-T with empty old directory fails'); -$node->command_fails( - [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=" ], - '-T with empty new directory fails'); + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup_foo", + '--format' => 'plain', + '--tablespace-mapping' => '=/foo' + ], + '--tablespace-mapping with empty old directory fails'); $node->command_fails( [ - @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', - "-T/foo=/bar=/baz" + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup_foo", + '--format' => 'plain', + '--tablespace-mapping' => '/foo=' ], - '-T with multiple = fails'); + '--tablespace-mapping with empty new directory fails'); $node->command_fails( - [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo=/bar" ], - '-T with old directory not absolute fails'); + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup_foo", + '--format' => 'plain', + '--tablespace-mapping' => '/foo=/bar=/baz' + ], + '--tablespace-mapping with multiple = fails'); $node->command_fails( - [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-T/foo=bar" ], - '-T with new directory not absolute fails'); + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup_foo", + '--format' => 'plain', + '--tablespace-mapping' => 'foo=/bar' + ], + '--tablespace-mapping with old directory not absolute fails'); $node->command_fails( - [ @pg_basebackup_defs, '-D', "$tempdir/backup_foo", '-Fp', "-Tfoo" ], - '-T with invalid format fails'); + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup_foo", + '--format' => 'plain', + '--tablespace-mapping' => '/foo=bar' + ], + '--tablespace-mapping with new directory not absolute fails'); +$node->command_fails( + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup_foo", + '--format' => 'plain', + '--tablespace-mapping' => 'foo' + ], + '--tablespace-mapping with invalid format fails'); my $superlongname = "superlongname_" . ("x" x 100); # Tar format doesn't support filenames longer than 100 bytes. @@ -340,7 +386,11 @@ SKIP: or die "unable to create file $superlongpath"; close $file; $node->command_fails( - [ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l1", '-Ft' ], + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/tarbackup_l1", + '--format' => 'tar' + ], 'pg_basebackup tar with long name fails'); unlink "$superlongpath"; } @@ -384,7 +434,7 @@ $node->safe_psql('postgres', $node->safe_psql('postgres', "CREATE TABLE test1 (a int) TABLESPACE tblspc1;" . "INSERT INTO test1 VALUES (1234);"); -$node->backup('tarbackup2', backup_options => ['-Ft']); +$node->backup('tarbackup2', backup_options => [ '--format' => 'tar' ]); # empty test1, just so that it's different from the to-be-restored data $node->safe_psql('postgres', "TRUNCATE TABLE test1;"); @@ -451,14 +501,19 @@ foreach my $filename (@tempRelationFiles) } $node->command_fails( - [ @pg_basebackup_defs, '-D', "$tempdir/backup1", '-Fp' ], + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup1", + '--format' => 'plain' + ], 'plain format with tablespaces fails without tablespace mapping'); $node->command_ok( [ - @pg_basebackup_defs, '-D', - "$tempdir/backup1", '-Fp', - "-T$realTsDir=$tempdir/tbackup/tblspc1", + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup1", + '--format' => 'plain', + '--tablespace-mapping' => "$realTsDir=$tempdir/tbackup/tblspc1", ], 'plain format with tablespaces succeeds with tablespace mapping'); ok(-d "$tempdir/tbackup/tblspc1", 'tablespace was relocated'); @@ -526,9 +581,10 @@ $node->safe_psql('postgres', $realTsDir =~ s/=/\\=/; $node->command_ok( [ - @pg_basebackup_defs, '-D', - "$tempdir/backup3", '-Fp', - "-T$realTsDir=$tempdir/tbackup/tbl\\=spc2", + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup3", + '--format' => 'plain', + '--tablespace-mapping' => "$realTsDir=$tempdir/tbackup/tbl\\=spc2", ], 'mapping tablespace with = sign in path'); ok(-d "$tempdir/tbackup/tbl=spc2", 'tablespace with = sign was relocated'); @@ -540,13 +596,22 @@ $realTsDir = "$real_sys_tempdir/$superlongname"; $node->safe_psql('postgres', "CREATE TABLESPACE tblspc3 LOCATION '$realTsDir';"); $node->command_ok( - [ @pg_basebackup_defs, '-D', "$tempdir/tarbackup_l3", '-Ft' ], + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/tarbackup_l3", + '--format' => 'tar' + ], 'pg_basebackup tar with long symlink target'); $node->safe_psql('postgres', "DROP TABLESPACE tblspc3;"); rmtree("$tempdir/tarbackup_l3"); -$node->command_ok([ @pg_basebackup_defs, '-D', "$tempdir/backupR", '-R' ], - 'pg_basebackup -R runs'); +$node->command_ok( + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupR", + '--write-recovery-conf' + ], + 'pg_basebackup --write-recovery-conf runs'); ok(-f "$tempdir/backupR/postgresql.auto.conf", 'postgresql.auto.conf exists'); ok(-f "$tempdir/backupR/standby.signal", 'standby.signal was created'); my $recovery_conf = slurp_file "$tempdir/backupR/postgresql.auto.conf"; @@ -558,76 +623,105 @@ like( qr/^primary_conninfo = '.*port=$port.*'\n/m, 'postgresql.auto.conf sets primary_conninfo'); -$node->command_ok( - [ @pg_basebackup_defs, '-D', "$tempdir/backupxd" ], +$node->command_ok([ @pg_basebackup_defs, '--pgdata' => "$tempdir/backupxd" ], 'pg_basebackup runs in default xlog mode'); ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxd/pg_wal")), 'WAL files copied'); rmtree("$tempdir/backupxd"); $node->command_ok( - [ @pg_basebackup_defs, '-D', "$tempdir/backupxf", '-X', 'fetch' ], - 'pg_basebackup -X fetch runs'); + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxf", + '--wal-method' => 'fetch' + ], + 'pg_basebackup --wal-method fetch runs'); ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxf/pg_wal")), 'WAL files copied'); rmtree("$tempdir/backupxf"); $node->command_ok( - [ @pg_basebackup_defs, '-D', "$tempdir/backupxs", '-X', 'stream' ], - 'pg_basebackup -X stream runs'); + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxs", + '--wal-method' => 'stream' + ], + 'pg_basebackup --wal-method stream runs'); ok(grep(/^[0-9A-F]{24}$/, slurp_dir("$tempdir/backupxs/pg_wal")), 'WAL files copied'); rmtree("$tempdir/backupxs"); $node->command_ok( [ - @pg_basebackup_defs, '-D', "$tempdir/backupxst", '-X', 'stream', - '-Ft' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxst", + '--wal-method' => 'stream', + '--format' => 'tar' ], - 'pg_basebackup -X stream runs in tar mode'); + 'pg_basebackup --wal-method stream runs in tar mode'); ok(-f "$tempdir/backupxst/pg_wal.tar", "tar file was created"); rmtree("$tempdir/backupxst"); $node->command_ok( [ - @pg_basebackup_defs, '-D', - "$tempdir/backupnoslot", '-X', - 'stream', '--no-slot' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupnoslot", + '--wal-method' => 'stream', + '--no-slot' ], - 'pg_basebackup -X stream runs with --no-slot'); + 'pg_basebackup --wal-method stream runs with --no-slot'); rmtree("$tempdir/backupnoslot"); $node->command_ok( - [ @pg_basebackup_defs, '-D', "$tempdir/backupxf", '-X', 'fetch' ], - 'pg_basebackup -X fetch runs'); + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxf", + '--wal-method' => 'fetch' + ], + 'pg_basebackup --wal-method fetch runs'); $node->command_fails_like( - [ @pg_basebackup_defs, '--target', 'blackhole' ], + [ @pg_basebackup_defs, '--target' => 'blackhole' ], qr/WAL cannot be streamed when a backup target is specified/, - 'backup target requires -X'); + 'backup target requires --wal-method'); $node->command_fails_like( - [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'stream' ], + [ + @pg_basebackup_defs, + '--target' => 'blackhole', + '--wal-method' => 'stream' + ], qr/WAL cannot be streamed when a backup target is specified/, - 'backup target requires -X other than -X stream'); + 'backup target requires --wal-method other than --wal-method stream'); $node->command_fails_like( - [ @pg_basebackup_defs, '--target', 'bogus', '-X', 'none' ], + [ @pg_basebackup_defs, '--target' => 'bogus', '--wal-method' => 'none' ], qr/unrecognized target/, 'backup target unrecognized'); $node->command_fails_like( [ - @pg_basebackup_defs, '--target', 'blackhole', '-X', - 'none', '-D', "$tempdir/blackhole" + @pg_basebackup_defs, + '--target' => 'blackhole', + '--wal-method' => 'none', + '--pgdata' => "$tempdir/blackhole" ], qr/cannot specify both output directory and backup target/, 'backup target and output directory'); $node->command_fails_like( - [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none', '-Ft' ], + [ + @pg_basebackup_defs, + '--target' => 'blackhole', + '--wal-method' => 'none', + '--format' => 'tar' + ], qr/cannot specify both format and backup target/, 'backup target and output directory'); $node->command_ok( - [ @pg_basebackup_defs, '--target', 'blackhole', '-X', 'none' ], + [ + @pg_basebackup_defs, + '--target' => 'blackhole', + '--wal-method' => 'none' + ], 'backup target blackhole'); $node->command_ok( [ - @pg_basebackup_defs, '--target', - "server:$tempdir/backuponserver", '-X', - 'none' + @pg_basebackup_defs, + '--target' => "server:$tempdir/backuponserver", + '--wal-method' => 'none' ], 'backup target server'); ok(-f "$tempdir/backuponserver/base.tar", 'backup tar was created'); @@ -638,9 +732,10 @@ $node->command_ok( 'create backup user'); $node->command_ok( [ - @pg_basebackup_defs, '-U', 'backupuser', '--target', - "server:$tempdir/backuponserver", - '-X', 'none' + @pg_basebackup_defs, + '--username' => 'backupuser', + '--target' => "server:$tempdir/backuponserver", + '--wal-method' => 'none' ], 'backup target server'); ok( -f "$tempdir/backuponserver/base.tar", @@ -649,66 +744,82 @@ rmtree("$tempdir/backuponserver"); $node->command_fails( [ - @pg_basebackup_defs, '-D', - "$tempdir/backupxs_sl_fail", '-X', - 'stream', '-S', - 'slot0' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxs_sl_fail", + '--wal-method' => 'stream', + '--slot' => 'slot0' ], 'pg_basebackup fails with nonexistent replication slot'); $node->command_fails( - [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C' ], - 'pg_basebackup -C fails without slot name'); + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxs_slot", + '--create-slot' + ], + 'pg_basebackup --create-slot fails without slot name'); $node->command_fails( [ - @pg_basebackup_defs, '-D', - "$tempdir/backupxs_slot", '-C', - '-S', 'slot0', + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxs_slot", + '--create-slot', + '--slot' => 'slot0', '--no-slot' ], - 'pg_basebackup fails with -C -S --no-slot'); + 'pg_basebackup fails with --create-slot --slot --no-slot'); $node->command_fails_like( [ - @pg_basebackup_defs, '--target', 'blackhole', '-D', - "$tempdir/blackhole" + @pg_basebackup_defs, + '--target' => 'blackhole', + '--pgdata' => "$tempdir/blackhole" ], qr/cannot specify both output directory and backup target/, 'backup target and output directory'); $node->command_ok( - [ @pg_basebackup_defs, '-D', "$tempdir/backuptr/co", '-X', 'none' ], - 'pg_basebackup -X fetch runs'); + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backuptr/co", + '--wal-method' => 'none' + ], + 'pg_basebackup --wal-method fetch runs'); $node->command_fails( [ - @pg_basebackup_defs, '-D', - "$tempdir/backupxs_sl_fail", '-X', - 'stream', '-S', - 'slot0' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxs_sl_fail", + '--wal-method' => 'stream', + '--slot' => 'slot0' ], 'pg_basebackup fails with nonexistent replication slot'); $node->command_fails( - [ @pg_basebackup_defs, '-D', "$tempdir/backupxs_slot", '-C' ], - 'pg_basebackup -C fails without slot name'); + [ + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxs_slot", + '--create-slot' + ], + 'pg_basebackup --create-slot fails without slot name'); $node->command_fails( [ - @pg_basebackup_defs, '-D', - "$tempdir/backupxs_slot", '-C', - '-S', 'slot0', + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxs_slot", + '--create-slot', + '--slot' => 'slot0', '--no-slot' ], - 'pg_basebackup fails with -C -S --no-slot'); + 'pg_basebackup fails with --create-slot --slot --no-slot'); $node->command_ok( [ - @pg_basebackup_defs, '-D', - "$tempdir/backupxs_slot", '-C', - '-S', 'slot0' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxs_slot", + '--create-slot', + '--slot' => 'slot0' ], - 'pg_basebackup -C runs'); + 'pg_basebackup --create-slot runs'); rmtree("$tempdir/backupxs_slot"); is( $node->safe_psql( @@ -727,11 +838,13 @@ isnt( $node->command_fails( [ - @pg_basebackup_defs, '-D', - "$tempdir/backupxs_slot1", '-C', - '-S', 'slot0' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxs_slot1", + '--create-slot', + '--slot' => 'slot0' ], - 'pg_basebackup fails with -C -S and a previously existing slot'); + 'pg_basebackup fails with --create-slot --slot and a previously existing slot' +); $node->safe_psql('postgres', q{SELECT * FROM pg_create_physical_replication_slot('slot1')}); @@ -741,16 +854,20 @@ my $lsn = $node->safe_psql('postgres', is($lsn, '', 'restart LSN of new slot is null'); $node->command_fails( [ - @pg_basebackup_defs, '-D', "$tempdir/fail", '-S', - 'slot1', '-X', 'none' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/fail", + '--slot' => 'slot1', + '--wal-method' => 'none' ], 'pg_basebackup with replication slot fails without WAL streaming'); $node->command_ok( [ - @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl", '-X', - 'stream', '-S', 'slot1' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxs_sl", + '--wal-method' => 'stream', + '--slot' => 'slot1' ], - 'pg_basebackup -X stream with replication slot runs'); + 'pg_basebackup --wal-method stream with replication slot runs'); $lsn = $node->safe_psql('postgres', q{SELECT restart_lsn FROM pg_replication_slots WHERE slot_name = 'slot1'} ); @@ -759,10 +876,13 @@ rmtree("$tempdir/backupxs_sl"); $node->command_ok( [ - @pg_basebackup_defs, '-D', "$tempdir/backupxs_sl_R", '-X', - 'stream', '-S', 'slot1', '-R', + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backupxs_sl_R", + '--wal-method' => 'stream', + '--slot' => 'slot1', + '--write-recovery-conf', ], - 'pg_basebackup with replication slot and -R runs'); + 'pg_basebackup with replication slot and --write-recovery-conf runs'); like( slurp_file("$tempdir/backupxs_sl_R/postgresql.auto.conf"), qr/^primary_slot_name = 'slot1'\n/m, @@ -774,10 +894,13 @@ rmtree("$tempdir/backupxs_sl_R"); $node->command_ok( [ - @pg_basebackup_defs, '-D', "$tempdir/backup_dbname_R", '-X', - 'stream', '-d', "dbname=db1", '-R', + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup_dbname_R", + '--wal-method' => 'stream', + '--dbname' => "dbname=db1", + '--write-recovery-conf', ], - 'pg_basebackup with dbname and -R runs'); + 'pg_basebackup with dbname and --write-recovery-conf runs'); like(slurp_file("$tempdir/backup_dbname_R/postgresql.auto.conf"), qr/dbname=db1/m, 'recovery conf file sets dbname'); @@ -800,7 +923,7 @@ $node->corrupt_page_checksum($file_corrupt1, 0); $node->start; $node->command_checks_all( - [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt" ], + [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt" ], 1, [qr{^$}], [qr/^WARNING.*checksum verification failed/s], @@ -816,7 +939,7 @@ for my $i (1 .. 5) $node->start; $node->command_checks_all( - [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt2" ], + [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt2" ], 1, [qr{^$}], [qr/^WARNING.*further.*failures.*will.not.be.reported/s], @@ -829,7 +952,7 @@ $node->corrupt_page_checksum($file_corrupt2, 0); $node->start; $node->command_checks_all( - [ @pg_basebackup_defs, '-D', "$tempdir/backup_corrupt3" ], + [ @pg_basebackup_defs, '--pgdata' => "$tempdir/backup_corrupt3" ], 1, [qr{^$}], [qr/^WARNING.*7 total checksum verification failures/s], @@ -839,8 +962,9 @@ rmtree("$tempdir/backup_corrupt3"); # do not verify checksums, should return ok $node->command_ok( [ - @pg_basebackup_defs, '-D', - "$tempdir/backup_corrupt4", '--no-verify-checksums', + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup_corrupt4", + '--no-verify-checksums', ], 'pg_basebackup with -k does not report checksum mismatch'); rmtree("$tempdir/backup_corrupt4"); @@ -858,25 +982,26 @@ SKIP: $node->command_ok( [ - @pg_basebackup_defs, '-D', - "$tempdir/backup_gzip", '--compress', - '1', '--format', - 't' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup_gzip", + '--compress' => '1', + '--format' => 't' ], 'pg_basebackup with --compress'); $node->command_ok( [ - @pg_basebackup_defs, '-D', - "$tempdir/backup_gzip2", '--gzip', - '--format', 't' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup_gzip2", + '--gzip', + '--format' => 't' ], 'pg_basebackup with --gzip'); $node->command_ok( [ - @pg_basebackup_defs, '-D', - "$tempdir/backup_gzip3", '--compress', - 'gzip:1', '--format', - 't' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/backup_gzip3", + '--compress' => 'gzip:1', + '--format' => 't' ], 'pg_basebackup with --compress=gzip:1'); @@ -921,16 +1046,13 @@ my ($sigchld_bb_stdin, $sigchld_bb_stdout, $sigchld_bb_stderr) = ('', '', ''); my $sigchld_bb = IPC::Run::start( [ @pg_basebackup_defs, '--wal-method=stream', - '-D', "$tempdir/sigchld", - '--max-rate=32', '-d', - $node->connstr('postgres') + '--pgdata' => "$tempdir/sigchld", + '--max-rate' => '32', + '--dbname' => $node->connstr('postgres') ], - '<', - \$sigchld_bb_stdin, - '>', - \$sigchld_bb_stdout, - '2>', - \$sigchld_bb_stderr, + '<' => \$sigchld_bb_stdin, + '>' => \$sigchld_bb_stdout, + '2>' => \$sigchld_bb_stderr, $sigchld_bb_timeout); is( $node->poll_query_until( @@ -977,9 +1099,9 @@ $node2->start; $node2->command_fails_like( [ - @pg_basebackup_defs, '-D', - "$tempdir" . '/diff_sysid', '--incremental', - "$backupdir" . '/backup_manifest' + @pg_basebackup_defs, + '--pgdata' => "$tempdir/diff_sysid", + '--incremental' => "$backupdir/backup_manifest", ], qr/system identifier in backup manifest is .*, but database system identifier is/, "pg_basebackup fails with different database system manifest"); diff --git a/src/bin/pg_basebackup/t/011_in_place_tablespace.pl b/src/bin/pg_basebackup/t/011_in_place_tablespace.pl index 1e3c0002ec5..9e53dada4fa 100644 --- a/src/bin/pg_basebackup/t/011_in_place_tablespace.pl +++ b/src/bin/pg_basebackup/t/011_in_place_tablespace.pl @@ -12,7 +12,8 @@ my $tempdir = PostgreSQL::Test::Utils::tempdir; # to keep test times reasonable. Using @pg_basebackup_defs as the first # element of the array passed to IPC::Run interpolate the array (as it is # not a reference to an array)... -my @pg_basebackup_defs = ('pg_basebackup', '--no-sync', '-cfast'); +my @pg_basebackup_defs = + ('pg_basebackup', '--no-sync', '--checkpoint' => 'fast'); # Set up an instance. my $node = PostgreSQL::Test::Cluster->new('main'); @@ -28,7 +29,12 @@ EOM # Back it up. my $backupdir = $tempdir . '/backup'; $node->command_ok( - [ @pg_basebackup_defs, '-D', $backupdir, '-Ft', '-X', 'none' ], + [ + @pg_basebackup_defs, + '--pgdata' => $backupdir, + '--format' => 'tar', + '--wal-method' => 'none' + ], 'pg_basebackup runs'); # Make sure we got base.tar and one tablespace. diff --git a/src/bin/pg_basebackup/t/020_pg_receivewal.pl b/src/bin/pg_basebackup/t/020_pg_receivewal.pl index 7e3225dcd9a..4be96affd7b 100644 --- a/src/bin/pg_basebackup/t/020_pg_receivewal.pl +++ b/src/bin/pg_basebackup/t/020_pg_receivewal.pl @@ -25,28 +25,43 @@ mkdir($stream_dir); $primary->command_fails(['pg_receivewal'], 'pg_receivewal needs target directory specified'); $primary->command_fails( - [ 'pg_receivewal', '-D', $stream_dir, '--create-slot', '--drop-slot' ], + [ + 'pg_receivewal', + '--directory' => $stream_dir, + '--create-slot', + '--drop-slot', + ], 'failure if both --create-slot and --drop-slot specified'); $primary->command_fails( - [ 'pg_receivewal', '-D', $stream_dir, '--create-slot' ], + [ 'pg_receivewal', '--directory' => $stream_dir, '--create-slot' ], 'failure if --create-slot specified without --slot'); $primary->command_fails( - [ 'pg_receivewal', '-D', $stream_dir, '--synchronous', '--no-sync' ], + [ + 'pg_receivewal', + '--directory' => $stream_dir, + '--synchronous', + '--no-sync', + ], 'failure if --synchronous specified with --no-sync'); $primary->command_fails_like( - [ 'pg_receivewal', '-D', $stream_dir, '--compress', 'none:1', ], + [ + 'pg_receivewal', + '--directory' => $stream_dir, + '--compress' => 'none:1', + ], qr/\Qpg_receivewal: error: invalid compression specification: compression algorithm "none" does not accept a compression level/, 'failure if --compress none:N (where N > 0)'); # Slot creation and drop my $slot_name = 'test'; $primary->command_ok( - [ 'pg_receivewal', '--slot', $slot_name, '--create-slot' ], + [ 'pg_receivewal', '--slot' => $slot_name, '--create-slot' ], 'creating a replication slot'); my $slot = $primary->slot($slot_name); is($slot->{'slot_type'}, 'physical', 'physical replication slot was created'); is($slot->{'restart_lsn'}, '', 'restart LSN of new slot is null'); -$primary->command_ok([ 'pg_receivewal', '--slot', $slot_name, '--drop-slot' ], +$primary->command_ok( + [ 'pg_receivewal', '--slot' => $slot_name, '--drop-slot' ], 'dropping a replication slot'); is($primary->slot($slot_name)->{'slot_type'}, '', 'replication slot was removed'); @@ -66,8 +81,12 @@ $primary->psql('postgres', 'INSERT INTO test_table VALUES (1);'); # compression involved. $primary->command_ok( [ - 'pg_receivewal', '-D', $stream_dir, '--verbose', - '--endpos', $nextlsn, '--synchronous', '--no-loop' + 'pg_receivewal', + '--directory' => $stream_dir, + '--verbose', + '--endpos' => $nextlsn, + '--synchronous', + '--no-loop', ], 'streaming some WAL with --synchronous'); @@ -92,8 +111,11 @@ SKIP: $primary->command_ok( [ - 'pg_receivewal', '-D', $stream_dir, '--verbose', - '--endpos', $nextlsn, '--compress', 'gzip:1', + 'pg_receivewal', + '--directory' => $stream_dir, + '--verbose', + '--endpos' => $nextlsn, + '--compress' => 'gzip:1', '--no-loop' ], "streaming some WAL using ZLIB compression"); @@ -145,9 +167,12 @@ SKIP: # Stream up to the given position. $primary->command_ok( [ - 'pg_receivewal', '-D', $stream_dir, '--verbose', - '--endpos', $nextlsn, '--no-loop', '--compress', - 'lz4' + 'pg_receivewal', + '--directory' => $stream_dir, + '--verbose', + '--endpos' => $nextlsn, + '--no-loop', + '--compress' => 'lz4' ], 'streaming some WAL using --compress=lz4'); @@ -191,8 +216,11 @@ chomp($nextlsn); $primary->psql('postgres', 'INSERT INTO test_table VALUES (4);'); $primary->command_ok( [ - 'pg_receivewal', '-D', $stream_dir, '--verbose', - '--endpos', $nextlsn, '--no-loop' + 'pg_receivewal', + '--directory' => $stream_dir, + '--verbose', + '--endpos' => $nextlsn, + '--no-loop' ], "streaming some WAL"); @@ -247,17 +275,25 @@ $primary->psql('postgres', 'INSERT INTO test_table VALUES (6);'); # Check case where the slot does not exist. $primary->command_fails_like( [ - 'pg_receivewal', '-D', $slot_dir, '--slot', - 'nonexistentslot', '-n', '--no-sync', '--verbose', - '--endpos', $nextlsn + 'pg_receivewal', + '--directory' => $slot_dir, + '--slot' => 'nonexistentslot', + '--no-loop', + '--no-sync', + '--verbose', + '--endpos' => $nextlsn ], qr/pg_receivewal: error: replication slot "nonexistentslot" does not exist/, 'pg_receivewal fails with non-existing slot'); $primary->command_ok( [ - 'pg_receivewal', '-D', $slot_dir, '--slot', - $slot_name, '-n', '--no-sync', '--verbose', - '--endpos', $nextlsn + 'pg_receivewal', + '--directory' => $slot_dir, + '--slot' => $slot_name, + '--no-loop', + '--no-sync', + '--verbose', + '--endpos' => $nextlsn ], "WAL streamed from the slot's restart_lsn"); ok(-e "$slot_dir/$walfile_streamed", @@ -311,9 +347,13 @@ mkdir($timeline_dir); $standby->command_ok( [ - 'pg_receivewal', '-D', $timeline_dir, '--verbose', - '--endpos', $nextlsn, '--slot', $archive_slot, - '--no-sync', '-n' + 'pg_receivewal', + '--directory' => $timeline_dir, + '--verbose', + '--endpos' => $nextlsn, + '--slot' => $archive_slot, + '--no-sync', + '--no-loop' ], "Stream some wal after promoting, resuming from the slot's position"); ok(-e "$timeline_dir/$walfile_before_promotion", diff --git a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl index 315b6423b94..a6e10600161 100644 --- a/src/bin/pg_basebackup/t/030_pg_recvlogical.pl +++ b/src/bin/pg_basebackup/t/030_pg_recvlogical.pl @@ -28,23 +28,27 @@ $node->dump_info; $node->start; $node->command_fails(['pg_recvlogical'], 'pg_recvlogical needs a slot name'); -$node->command_fails([ 'pg_recvlogical', '-S', 'test' ], +$node->command_fails( + [ 'pg_recvlogical', '--slot' => 'test' ], 'pg_recvlogical needs a database'); -$node->command_fails([ 'pg_recvlogical', '-S', 'test', '-d', 'postgres' ], +$node->command_fails( + [ 'pg_recvlogical', '--slot' => 'test', '--dbname' => 'postgres' ], 'pg_recvlogical needs an action'); $node->command_fails( [ - 'pg_recvlogical', '-S', - 'test', '-d', - $node->connstr('postgres'), '--start' + 'pg_recvlogical', + '--slot' => 'test', + '--dbname' => $node->connstr('postgres'), + '--start', ], 'no destination file'); $node->command_ok( [ - 'pg_recvlogical', '-S', - 'test', '-d', - $node->connstr('postgres'), '--create-slot' + 'pg_recvlogical', + '--slot' => 'test', + '--dbname' => $node->connstr('postgres'), + '--create-slot', ], 'slot created'); @@ -60,26 +64,33 @@ chomp($nextlsn); $node->command_ok( [ - 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'), - '--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-' + 'pg_recvlogical', + '--slot' => 'test', + '--dbname' => $node->connstr('postgres'), + '--start', + '--endpos' => $nextlsn, + '--no-loop', + '--file' => '-', ], 'replayed a transaction'); $node->command_ok( [ - 'pg_recvlogical', '-S', - 'test', '-d', - $node->connstr('postgres'), '--drop-slot' + 'pg_recvlogical', + '--slot' => 'test', + '--dbname' => $node->connstr('postgres'), + '--drop-slot' ], 'slot dropped'); #test with two-phase option enabled $node->command_ok( [ - 'pg_recvlogical', '-S', - 'test', '-d', - $node->connstr('postgres'), '--create-slot', - '--two-phase' + 'pg_recvlogical', + '--slot' => 'test', + '--dbname' => $node->connstr('postgres'), + '--create-slot', + '--two-phase', ], 'slot with two-phase created'); @@ -94,19 +105,25 @@ chomp($nextlsn); $node->command_fails( [ - 'pg_recvlogical', '-S', - 'test', '-d', - $node->connstr('postgres'), '--start', - '--endpos', "$nextlsn", + 'pg_recvlogical', + '--slot' => 'test', + '--dbname' => $node->connstr('postgres'), + '--start', + '--endpos' => $nextlsn, '--two-phase', '--no-loop', - '-f', '-' + '--file' => '-', ], 'incorrect usage'); $node->command_ok( [ - 'pg_recvlogical', '-S', 'test', '-d', $node->connstr('postgres'), - '--start', '--endpos', "$nextlsn", '--no-loop', '-f', '-' + 'pg_recvlogical', + '--slot' => 'test', + '--dbname' => $node->connstr('postgres'), + '--start', + '--endpos' => $nextlsn, + '--no-loop', + '--file' => '-', ], 'replayed a two-phase transaction'); diff --git a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl index 5426159fa5a..c8dbdb7e9b7 100644 --- a/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl +++ b/src/bin/pg_basebackup/t/040_pg_createsubscriber.pl @@ -46,69 +46,75 @@ sub generate_db command_fails(['pg_createsubscriber'], 'no subscriber data directory specified'); command_fails( - [ 'pg_createsubscriber', '--pgdata', $datadir ], + [ 'pg_createsubscriber', '--pgdata' => $datadir ], 'no publisher connection string specified'); command_fails( [ - 'pg_createsubscriber', '--verbose', - '--pgdata', $datadir, - '--publisher-server', 'port=5432' + 'pg_createsubscriber', + '--verbose', + '--pgdata' => $datadir, + '--publisher-server' => 'port=5432', ], 'no database name specified'); command_fails( [ - 'pg_createsubscriber', '--verbose', - '--pgdata', $datadir, - '--publisher-server', 'port=5432', - '--database', 'pg1', - '--database', 'pg1' + 'pg_createsubscriber', + '--verbose', + '--pgdata' => $datadir, + '--publisher-server' => 'port=5432', + '--database' => 'pg1', + '--database' => 'pg1', ], 'duplicate database name'); command_fails( [ - 'pg_createsubscriber', '--verbose', - '--pgdata', $datadir, - '--publisher-server', 'port=5432', - '--publication', 'foo1', - '--publication', 'foo1', - '--database', 'pg1', - '--database', 'pg2' + 'pg_createsubscriber', + '--verbose', + '--pgdata' => $datadir, + '--publisher-server' => 'port=5432', + '--publication' => 'foo1', + '--publication' => 'foo1', + '--database' => 'pg1', + '--database' => 'pg2', ], 'duplicate publication name'); command_fails( [ - 'pg_createsubscriber', '--verbose', - '--pgdata', $datadir, - '--publisher-server', 'port=5432', - '--publication', 'foo1', - '--database', 'pg1', - '--database', 'pg2' + 'pg_createsubscriber', + '--verbose', + '--pgdata' => $datadir, + '--publisher-server' => 'port=5432', + '--publication' => 'foo1', + '--database' => 'pg1', + '--database' => 'pg2', ], 'wrong number of publication names'); command_fails( [ - 'pg_createsubscriber', '--verbose', - '--pgdata', $datadir, - '--publisher-server', 'port=5432', - '--publication', 'foo1', - '--publication', 'foo2', - '--subscription', 'bar1', - '--database', 'pg1', - '--database', 'pg2' + 'pg_createsubscriber', + '--verbose', + '--pgdata' => $datadir, + '--publisher-server' => 'port=5432', + '--publication' => 'foo1', + '--publication' => 'foo2', + '--subscription' => 'bar1', + '--database' => 'pg1', + '--database' => 'pg2', ], 'wrong number of subscription names'); command_fails( [ - 'pg_createsubscriber', '--verbose', - '--pgdata', $datadir, - '--publisher-server', 'port=5432', - '--publication', 'foo1', - '--publication', 'foo2', - '--subscription', 'bar1', - '--subscription', 'bar2', - '--replication-slot', 'baz1', - '--database', 'pg1', - '--database', 'pg2' + 'pg_createsubscriber', + '--verbose', + '--pgdata' => $datadir, + '--publisher-server' => 'port=5432', + '--publication' => 'foo1', + '--publication' => 'foo2', + '--subscription' => 'bar1', + '--subscription' => 'bar2', + '--replication-slot' => 'baz1', + '--database' => 'pg1', + '--database' => 'pg2', ], 'wrong number of replication slot names'); @@ -168,41 +174,44 @@ $node_t->stop; # Run pg_createsubscriber on a promoted server command_fails( [ - 'pg_createsubscriber', '--verbose', - '--dry-run', '--pgdata', - $node_t->data_dir, '--publisher-server', - $node_p->connstr($db1), '--socketdir', - $node_t->host, '--subscriber-port', - $node_t->port, '--database', - $db1, '--database', - $db2 + 'pg_createsubscriber', + '--verbose', + '--dry-run', + '--pgdata' => $node_t->data_dir, + '--publisher-server' => $node_p->connstr($db1), + '--socketdir' => $node_t->host, + '--subscriber-port' => $node_t->port, + '--database' => $db1, + '--database' => $db2, ], 'target server is not in recovery'); # Run pg_createsubscriber when standby is running command_fails( [ - 'pg_createsubscriber', '--verbose', - '--dry-run', '--pgdata', - $node_s->data_dir, '--publisher-server', - $node_p->connstr($db1), '--socketdir', - $node_s->host, '--subscriber-port', - $node_s->port, '--database', - $db1, '--database', - $db2 + 'pg_createsubscriber', + '--verbose', + '--dry-run', + '--pgdata' => $node_s->data_dir, + '--publisher-server' => $node_p->connstr($db1), + '--socketdir' => $node_s->host, + '--subscriber-port' => $node_s->port, + '--database' => $db1, + '--database' => $db2, ], 'standby is up and running'); # Run pg_createsubscriber on about-to-fail node F command_fails( [ - 'pg_createsubscriber', '--verbose', - '--pgdata', $node_f->data_dir, - '--publisher-server', $node_p->connstr($db1), - '--socketdir', $node_f->host, - '--subscriber-port', $node_f->port, - '--database', $db1, - '--database', $db2 + 'pg_createsubscriber', + '--verbose', + '--pgdata' => $node_f->data_dir, + '--publisher-server' => $node_p->connstr($db1), + '--socketdir' => $node_f->host, + '--subscriber-port' => $node_f->port, + '--database' => $db1, + '--database' => $db2 ], 'subscriber data directory is not a copy of the source database cluster'); @@ -216,14 +225,15 @@ $node_c->set_standby_mode(); # Run pg_createsubscriber on node C (P -> S -> C) command_fails( [ - 'pg_createsubscriber', '--verbose', - '--dry-run', '--pgdata', - $node_c->data_dir, '--publisher-server', - $node_s->connstr($db1), '--socketdir', - $node_c->host, '--subscriber-port', - $node_c->port, '--database', - $db1, '--database', - $db2 + 'pg_createsubscriber', + '--verbose', + '--dry-run', + '--pgdata' => $node_c->data_dir, + '--publisher-server' => $node_s->connstr($db1), + '--socketdir' => $node_c->host, + '--subscriber-port' => $node_c->port, + '--database' => $db1, + '--database' => $db2, ], 'primary server is in recovery'); @@ -239,14 +249,16 @@ $node_p->restart; $node_s->stop; command_fails( [ - 'pg_createsubscriber', '--verbose', - '--dry-run', '--pgdata', - $node_s->data_dir, '--publisher-server', - $node_p->connstr($db1), '--socketdir', - $node_s->host, '--subscriber-port', - $node_s->port, '--database', - $db1, '--database', - $db2 + 'pg_createsubscriber', + '--verbose', + '--dry-run', + '--pgdata' => $node_s->data_dir, + '--publisher-server' => $node_p->connstr($db1), + '--socketdir' => $node_s->host, + '--subscriber-port' => $node_s->port, + '--database' => $db1, + '--database' => $db2, + ], 'primary contains unmet conditions on node P'); # Restore default settings here but only apply it after testing standby. Some @@ -268,14 +280,15 @@ max_worker_processes = 2 }); command_fails( [ - 'pg_createsubscriber', '--verbose', - '--dry-run', '--pgdata', - $node_s->data_dir, '--publisher-server', - $node_p->connstr($db1), '--socketdir', - $node_s->host, '--subscriber-port', - $node_s->port, '--database', - $db1, '--database', - $db2 + 'pg_createsubscriber', + '--verbose', + '--dry-run', + '--pgdata' => $node_s->data_dir, + '--publisher-server' => $node_p->connstr($db1), + '--socketdir' => $node_s->host, + '--subscriber-port' => $node_s->port, + '--database' => $db1, + '--database' => $db2, ], 'standby contains unmet conditions on node S'); $node_s->append_conf( @@ -321,19 +334,20 @@ $node_s->stop; # dry run mode on node S command_ok( [ - 'pg_createsubscriber', '--verbose', - '--recovery-timeout', "$PostgreSQL::Test::Utils::timeout_default", - '--dry-run', '--pgdata', - $node_s->data_dir, '--publisher-server', - $node_p->connstr($db1), '--socketdir', - $node_s->host, '--subscriber-port', - $node_s->port, '--publication', - 'pub1', '--publication', - 'pub2', '--subscription', - 'sub1', '--subscription', - 'sub2', '--database', - $db1, '--database', - $db2 + 'pg_createsubscriber', + '--verbose', + '--dry-run', + '--recovery-timeout' => $PostgreSQL::Test::Utils::timeout_default, + '--pgdata' => $node_s->data_dir, + '--publisher-server' => $node_p->connstr($db1), + '--socketdir' => $node_s->host, + '--subscriber-port' => $node_s->port, + '--publication' => 'pub1', + '--publication' => 'pub2', + '--subscription' => 'sub1', + '--subscription' => 'sub2', + '--database' => $db1, + '--database' => $db2, ], 'run pg_createsubscriber --dry-run on node S'); @@ -346,32 +360,34 @@ $node_s->stop; # pg_createsubscriber can run without --databases option command_ok( [ - 'pg_createsubscriber', '--verbose', - '--dry-run', '--pgdata', - $node_s->data_dir, '--publisher-server', - $node_p->connstr($db1), '--socketdir', - $node_s->host, '--subscriber-port', - $node_s->port, '--replication-slot', - 'replslot1' + 'pg_createsubscriber', + '--verbose', + '--dry-run', + '--pgdata' => $node_s->data_dir, + '--publisher-server' => $node_p->connstr($db1), + '--socketdir' => $node_s->host, + '--subscriber-port' => $node_s->port, + '--replication-slot' => 'replslot1', ], 'run pg_createsubscriber without --databases'); -# Run pg_createsubscriber on node S +# Run pg_createsubscriber on node S. --verbose is used twice +# to show more information. command_ok( [ - 'pg_createsubscriber', '--verbose', - '--recovery-timeout', "$PostgreSQL::Test::Utils::timeout_default", - '--verbose', '--pgdata', - $node_s->data_dir, '--publisher-server', - $node_p->connstr($db1), '--socketdir', - $node_s->host, '--subscriber-port', - $node_s->port, '--publication', - 'pub1', '--publication', - 'Pub2', '--replication-slot', - 'replslot1', '--replication-slot', - 'replslot2', '--database', - $db1, '--database', - $db2 + 'pg_createsubscriber', + '--verbose', '--verbose', + '--recovery-timeout' => $PostgreSQL::Test::Utils::timeout_default, + '--pgdata' => $node_s->data_dir, + '--publisher-server' => $node_p->connstr($db1), + '--socketdir' => $node_s->host, + '--subscriber-port' => $node_s->port, + '--publication' => 'pub1', + '--publication' => 'pub2', + '--replication-slot' => 'replslot1', + '--replication-slot' => 'replslot2', + '--database' => $db1, + '--database' => $db2, ], 'run pg_createsubscriber on node S'); diff --git a/src/bin/pg_checksums/t/002_actions.pl b/src/bin/pg_checksums/t/002_actions.pl index f81108e86c4..339c1537a46 100644 --- a/src/bin/pg_checksums/t/002_actions.pl +++ b/src/bin/pg_checksums/t/002_actions.pl @@ -44,9 +44,10 @@ sub check_relation_corruption # corrupted yet. command_ok( [ - 'pg_checksums', '--check', - '-D', $pgdata, - '--filenode', $relfilenode_corrupted + 'pg_checksums', + '--check', + '--pgdata' => $pgdata, + '--filenode' => $relfilenode_corrupted, ], "succeeds for single relfilenode on tablespace $tablespace with offline cluster" ); @@ -57,9 +58,10 @@ sub check_relation_corruption # Checksum checks on single relfilenode fail $node->command_checks_all( [ - 'pg_checksums', '--check', - '-D', $pgdata, - '--filenode', $relfilenode_corrupted + 'pg_checksums', + '--check', + '--pgdata' => $pgdata, + '--filenode' => $relfilenode_corrupted, ], 1, [qr/Bad checksums:.*1/], @@ -69,7 +71,7 @@ sub check_relation_corruption # Global checksum checks fail as well $node->command_checks_all( - [ 'pg_checksums', '--check', '-D', $pgdata ], + [ 'pg_checksums', '--check', '--pgdata' => $pgdata ], 1, [qr/Bad checksums:.*1/], [qr/checksum verification failed/], @@ -79,7 +81,8 @@ sub check_relation_corruption $node->start; $node->safe_psql('postgres', "DROP TABLE $table;"); $node->stop; - $node->command_ok([ 'pg_checksums', '--check', '-D', $pgdata ], + $node->command_ok( + [ 'pg_checksums', '--check', '--pgdata' => $pgdata ], "succeeds again after table drop on tablespace $tablespace"); $node->start; @@ -122,11 +125,12 @@ append_to_file "$pgdata/global/.DS_Store", "foo" unless ($Config{osname} eq 'darwin'); # Enable checksums. -command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ], +command_ok([ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ], "checksums successfully enabled in cluster"); # Successive attempt to enable checksums fails. -command_fails([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ], +command_fails( + [ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ], "enabling checksums fails if already enabled"); # Control file should know that checksums are enabled. @@ -137,12 +141,12 @@ command_like( # Disable checksums again. Flush result here as that should be cheap. command_ok( - [ 'pg_checksums', '--disable', '-D', $pgdata ], + [ 'pg_checksums', '--disable', '--pgdata' => $pgdata ], "checksums successfully disabled in cluster"); # Successive attempt to disable checksums fails. command_fails( - [ 'pg_checksums', '--disable', '--no-sync', '-D', $pgdata ], + [ 'pg_checksums', '--disable', '--no-sync', '--pgdata' => $pgdata ], "disabling checksums fails if already disabled"); # Control file should know that checksums are disabled. @@ -152,7 +156,7 @@ command_like( 'checksums disabled in control file'); # Enable checksums again for follow-up tests. -command_ok([ 'pg_checksums', '--enable', '--no-sync', '-D', $pgdata ], +command_ok([ 'pg_checksums', '--enable', '--no-sync', '--pgdata' => $pgdata ], "checksums successfully enabled in cluster"); # Control file should know that checksums are enabled. @@ -162,21 +166,31 @@ command_like( 'checksums enabled in control file'); # Checksums pass on a newly-created cluster -command_ok([ 'pg_checksums', '--check', '-D', $pgdata ], +command_ok([ 'pg_checksums', '--check', '--pgdata' => $pgdata ], "succeeds with offline cluster"); # Checksums are verified if no other arguments are specified command_ok( - [ 'pg_checksums', '-D', $pgdata ], + [ 'pg_checksums', '--pgdata' => $pgdata ], "verifies checksums as default action"); # Specific relation files cannot be requested when action is --disable # or --enable. command_fails( - [ 'pg_checksums', '--disable', '--filenode', '1234', '-D', $pgdata ], + [ + 'pg_checksums', + '--disable', + '--filenode' => '1234', + '--pgdata' => $pgdata + ], "fails when relfilenodes are requested and action is --disable"); command_fails( - [ 'pg_checksums', '--enable', '--filenode', '1234', '-D', $pgdata ], + [ + 'pg_checksums', + '--enable', + '--filenode' => '1234', + '--pgdata' => $pgdata + ], "fails when relfilenodes are requested and action is --enable"); # Test postgres -C for an offline cluster. @@ -187,8 +201,10 @@ command_fails( # account on Windows. command_checks_all( [ - 'pg_ctl', 'start', '-D', $pgdata, '-s', '-o', - '-C data_checksums -c log_min_messages=fatal' + 'pg_ctl', 'start', + '--silent', + '--pgdata' => $pgdata, + '-o' => '-C data_checksums -c log_min_messages=fatal', ], 1, [qr/^on$/], @@ -197,7 +213,7 @@ command_checks_all( # Checks cannot happen with an online cluster $node->start; -command_fails([ 'pg_checksums', '--check', '-D', $pgdata ], +command_fails([ 'pg_checksums', '--check', '--pgdata' => $pgdata ], "fails with online cluster"); # Check corruption of table on default tablespace. @@ -224,7 +240,7 @@ sub fail_corrupt append_to_file $file_name, "foo"; $node->command_checks_all( - [ 'pg_checksums', '--check', '-D', $pgdata ], + [ 'pg_checksums', '--check', '--pgdata' => $pgdata ], 1, [qr/^$/], [qr/could not read block 0 in file.*$file\":/], @@ -242,7 +258,7 @@ $node->stop; # when verifying checksums. mkdir "$tablespace_dir/PG_99_999999991/"; append_to_file "$tablespace_dir/PG_99_999999991/foo", "123"; -command_ok([ 'pg_checksums', '--check', '-D', $pgdata ], +command_ok([ 'pg_checksums', '--check', '--pgdata' => $pgdata ], "succeeds with foreign tablespace"); # Authorized relation files filled with corrupted data cause the diff --git a/src/bin/pg_combinebackup/t/002_compare_backups.pl b/src/bin/pg_combinebackup/t/002_compare_backups.pl index 767dd4832ba..ebd68bfb850 100644 --- a/src/bin/pg_combinebackup/t/002_compare_backups.pl +++ b/src/bin/pg_combinebackup/t/002_compare_backups.pl @@ -58,9 +58,11 @@ my $tsbackup1path = $tempdir . '/ts1backup'; mkdir($tsbackup1path) || die "mkdir $tsbackup1path: $!"; $primary->command_ok( [ - 'pg_basebackup', '-D', - $backup1path, '--no-sync', - '-cfast', "-T${tsprimary}=${tsbackup1path}" + 'pg_basebackup', + '--no-sync', + '--pgdata' => $backup1path, + '--checkpoint' => 'fast', + '--tablespace-mapping' => "${tsprimary}=${tsbackup1path}" ], "full backup"); @@ -89,10 +91,12 @@ my $tsbackup2path = $tempdir . '/tsbackup2'; mkdir($tsbackup2path) || die "mkdir $tsbackup2path: $!"; $primary->command_ok( [ - 'pg_basebackup', '-D', - $backup2path, '--no-sync', - '-cfast', "-T${tsprimary}=${tsbackup2path}", - '--incremental', $backup1path . '/backup_manifest' + 'pg_basebackup', + '--no-sync', + '--pgdata' => $backup2path, + '--checkpoint' => 'fast', + '--tablespace-mapping' => "${tsprimary}=${tsbackup2path}", + '--incremental' => $backup1path . '/backup_manifest' ], "incremental backup"); @@ -169,18 +173,20 @@ my $dump1 = $backupdir . '/pitr1.dump'; my $dump2 = $backupdir . '/pitr2.dump'; $pitr1->command_ok( [ - 'pg_dumpall', '-f', - $dump1, '--no-sync', - '--no-unlogged-table-data', '-d', - $pitr1->connstr('postgres'), + 'pg_dumpall', + '--no-sync', + '--no-unlogged-table-data', + '--file' => $dump1, + '--dbname' => $pitr1->connstr('postgres'), ], 'dump from PITR 1'); $pitr2->command_ok( [ - 'pg_dumpall', '-f', - $dump2, '--no-sync', - '--no-unlogged-table-data', '-d', - $pitr2->connstr('postgres'), + 'pg_dumpall', + '--no-sync', + '--no-unlogged-table-data', + '--file' => $dump2, + '--dbname' => $pitr2->connstr('postgres'), ], 'dump from PITR 2'); diff --git a/src/bin/pg_combinebackup/t/003_timeline.pl b/src/bin/pg_combinebackup/t/003_timeline.pl index ad2dd04872e..0205a59f927 100644 --- a/src/bin/pg_combinebackup/t/003_timeline.pl +++ b/src/bin/pg_combinebackup/t/003_timeline.pl @@ -30,7 +30,12 @@ EOM # Take a full backup. my $backup1path = $node1->backup_dir . '/backup1'; $node1->command_ok( - [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ], + [ + 'pg_basebackup', + '--pgdata' => $backup1path, + '--no-sync', + '--checkpoint' => 'fast' + ], "full backup from node1"); # Insert a second row on the original node. @@ -42,8 +47,11 @@ EOM my $backup2path = $node1->backup_dir . '/backup2'; $node1->command_ok( [ - 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast', - '--incremental', $backup1path . '/backup_manifest' + 'pg_basebackup', + '--pgdata' => $backup2path, + '--no-sync', + '--checkpoint' => 'fast', + '--incremental' => $backup1path . '/backup_manifest' ], "incremental backup from node1"); @@ -65,8 +73,11 @@ EOM my $backup3path = $node1->backup_dir . '/backup3'; $node2->command_ok( [ - 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast', - '--incremental', $backup2path . '/backup_manifest' + 'pg_basebackup', + '--pgdata' => $backup3path, + '--no-sync', + '--checkpoint' => 'fast', + '--incremental' => $backup2path . '/backup_manifest' ], "incremental backup from node2"); diff --git a/src/bin/pg_combinebackup/t/004_manifest.pl b/src/bin/pg_combinebackup/t/004_manifest.pl index 2fe771f0e72..2a69d4d9b9c 100644 --- a/src/bin/pg_combinebackup/t/004_manifest.pl +++ b/src/bin/pg_combinebackup/t/004_manifest.pl @@ -25,7 +25,12 @@ $node->start; # Take a full backup. my $original_backup_path = $node->backup_dir . '/original'; $node->command_ok( - [ 'pg_basebackup', '-D', $original_backup_path, '--no-sync', '-cfast' ], + [ + 'pg_basebackup', + '--pgdata' => $original_backup_path, + '--no-sync', + '--checkpoint' => 'fast', + ], "full backup"); # Verify the full backup. @@ -39,9 +44,11 @@ sub combine_and_test_one_backup my $revised_backup_path = $node->backup_dir . '/' . $backup_name; $node->command_ok( [ - 'pg_combinebackup', $original_backup_path, - '-o', $revised_backup_path, - '--no-sync', @extra_options + 'pg_combinebackup', + $original_backup_path, + '--output' => $revised_backup_path, + '--no-sync', + @extra_options, ], "pg_combinebackup with @extra_options"); if (defined $failure_pattern) diff --git a/src/bin/pg_combinebackup/t/005_integrity.pl b/src/bin/pg_combinebackup/t/005_integrity.pl index 61bb8275f0e..cfacf5ad7a0 100644 --- a/src/bin/pg_combinebackup/t/005_integrity.pl +++ b/src/bin/pg_combinebackup/t/005_integrity.pl @@ -43,15 +43,23 @@ $node2->start; # Take a full backup from node1. my $backup1path = $node1->backup_dir . '/backup1'; $node1->command_ok( - [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ], + [ + 'pg_basebackup', + '--pgdata' => $backup1path, + '--no-sync', + '--checkpoint' => 'fast', + ], "full backup from node1"); # Now take an incremental backup. my $backup2path = $node1->backup_dir . '/backup2'; $node1->command_ok( [ - 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast', - '--incremental', $backup1path . '/backup_manifest' + 'pg_basebackup', + '--pgdata' => $backup2path, + '--no-sync', + '--checkpoint' => 'fast', + '--incremental' => $backup1path . '/backup_manifest', ], "incremental backup from node1"); @@ -59,23 +67,34 @@ $node1->command_ok( my $backup3path = $node1->backup_dir . '/backup3'; $node1->command_ok( [ - 'pg_basebackup', '-D', $backup3path, '--no-sync', '-cfast', - '--incremental', $backup2path . '/backup_manifest' + 'pg_basebackup', + '--pgdata' => $backup3path, + '--no-sync', + '--checkpoint' => 'fast', + '--incremental' => $backup2path . '/backup_manifest', ], "another incremental backup from node1"); # Take a full backup from node2. my $backupother1path = $node1->backup_dir . '/backupother1'; $node2->command_ok( - [ 'pg_basebackup', '-D', $backupother1path, '--no-sync', '-cfast' ], + [ + 'pg_basebackup', + '--pgdata' => $backupother1path, + '--no-sync', + '--checkpoint' => 'fast', + ], "full backup from node2"); # Take an incremental backup from node2. my $backupother2path = $node1->backup_dir . '/backupother2'; $node2->command_ok( [ - 'pg_basebackup', '-D', $backupother2path, '--no-sync', '-cfast', - '--incremental', $backupother1path . '/backup_manifest' + 'pg_basebackup', + '--pgdata' => $backupother2path, + '--no-sync', + '--checkpoint' => 'fast', + '--incremental' => $backupother1path . '/backup_manifest', ], "incremental backup from node2"); @@ -85,8 +104,9 @@ my $resultpath = $node1->backup_dir . '/result'; # Can't combine 2 full backups. $node1->command_fails_like( [ - 'pg_combinebackup', $backup1path, $backup1path, '-o', - $resultpath, $mode + 'pg_combinebackup', $backup1path, $backup1path, + '--output' => $resultpath, + $mode, ], qr/is a full backup, but only the first backup should be a full backup/, "can't combine full backups"); @@ -94,8 +114,9 @@ $node1->command_fails_like( # Can't combine 2 incremental backups. $node1->command_fails_like( [ - 'pg_combinebackup', $backup2path, $backup2path, '-o', - $resultpath, $mode + 'pg_combinebackup', $backup2path, $backup2path, + '--output' => $resultpath, + $mode, ], qr/is an incremental backup, but the first backup should be a full backup/, "can't combine full backups"); @@ -103,8 +124,9 @@ $node1->command_fails_like( # Can't combine full backup with an incremental backup from a different system. $node1->command_fails_like( [ - 'pg_combinebackup', $backup1path, $backupother2path, '-o', - $resultpath, $mode + 'pg_combinebackup', $backup1path, $backupother2path, + '--output' => $resultpath, + $mode, ], qr/expected system identifier.*but found/, "can't combine backups from different nodes"); @@ -117,7 +139,8 @@ copy("$backupother2path/backup_manifest", "$backup2path/backup_manifest") $node1->command_fails_like( [ 'pg_combinebackup', $backup1path, $backup2path, $backup3path, - '-o', $resultpath, $mode + '--output' => $resultpath, + $mode, ], qr/ manifest system identifier is .*, but control file has /, "can't combine backups with different manifest system identifier "); @@ -128,8 +151,9 @@ move("$backup2path/backup_manifest.orig", "$backup2path/backup_manifest") # Can't omit a required backup. $node1->command_fails_like( [ - 'pg_combinebackup', $backup1path, $backup3path, '-o', - $resultpath, $mode + 'pg_combinebackup', $backup1path, $backup3path, + '--output' => $resultpath, + $mode, ], qr/starts at LSN.*but expected/, "can't omit a required backup"); @@ -138,7 +162,8 @@ $node1->command_fails_like( $node1->command_fails_like( [ 'pg_combinebackup', $backup1path, $backup3path, $backup2path, - '-o', $resultpath, $mode + '--output' => $resultpath, + $mode, ], qr/starts at LSN.*but expected/, "can't combine backups in the wrong order"); @@ -147,7 +172,8 @@ $node1->command_fails_like( $node1->command_ok( [ 'pg_combinebackup', $backup1path, $backup2path, $backup3path, - '-o', $resultpath, $mode + '--output' => $resultpath, + $mode, ], "can combine 3 matching backups"); rmtree($resultpath); @@ -156,17 +182,18 @@ rmtree($resultpath); my $synthetic12path = $node1->backup_dir . '/synthetic12'; $node1->command_ok( [ - 'pg_combinebackup', $backup1path, $backup2path, '-o', - $synthetic12path, $mode + 'pg_combinebackup', $backup1path, $backup2path, + '--output' => $synthetic12path, + $mode, ], "can combine 2 matching backups"); # Can combine result of previous step with second incremental. $node1->command_ok( [ - 'pg_combinebackup', $synthetic12path, - $backup3path, '-o', - $resultpath, $mode + 'pg_combinebackup', $synthetic12path, $backup3path, + '--output' => $resultpath, + $mode, ], "can combine synthetic backup with later incremental"); rmtree($resultpath); @@ -174,9 +201,9 @@ rmtree($resultpath); # Can't combine result of 1+2 with 2. $node1->command_fails_like( [ - 'pg_combinebackup', $synthetic12path, - $backup2path, '-o', - $resultpath, $mode + 'pg_combinebackup', $synthetic12path, $backup2path, + '--output' => $resultpath, + $mode, ], qr/starts at LSN.*but expected/, "can't combine synthetic backup with included incremental"); diff --git a/src/bin/pg_combinebackup/t/006_db_file_copy.pl b/src/bin/pg_combinebackup/t/006_db_file_copy.pl index a125f971487..65dd4e2d460 100644 --- a/src/bin/pg_combinebackup/t/006_db_file_copy.pl +++ b/src/bin/pg_combinebackup/t/006_db_file_copy.pl @@ -29,7 +29,12 @@ EOM # Take a full backup. my $backup1path = $primary->backup_dir . '/backup1'; $primary->command_ok( - [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ], + [ + 'pg_basebackup', + '--pgdata' => $backup1path, + '--no-sync', + '--checkpoint' => 'fast' + ], "full backup"); # Now make some database changes. @@ -42,8 +47,11 @@ EOM my $backup2path = $primary->backup_dir . '/backup2'; $primary->command_ok( [ - 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast', - '--incremental', $backup1path . '/backup_manifest' + 'pg_basebackup', + '--pgdata' => $backup2path, + '--no-sync', + '--checkpoint' => 'fast', + '--incremental' => $backup1path . '/backup_manifest' ], "incremental backup"); diff --git a/src/bin/pg_combinebackup/t/007_wal_level_minimal.pl b/src/bin/pg_combinebackup/t/007_wal_level_minimal.pl index 0ac9af7f8b3..be24e055892 100644 --- a/src/bin/pg_combinebackup/t/007_wal_level_minimal.pl +++ b/src/bin/pg_combinebackup/t/007_wal_level_minimal.pl @@ -34,7 +34,12 @@ EOM # Take a full backup. my $backup1path = $node1->backup_dir . '/backup1'; $node1->command_ok( - [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ], + [ + 'pg_basebackup', + '--pgdata' => $backup1path, + '--no-sync', + '--checkpoint' => 'fast' + ], "full backup"); # Switch to wal_level=minimal, which also requires max_wal_senders=0 and @@ -63,8 +68,11 @@ $node1->restart; my $backup2path = $node1->backup_dir . '/backup2'; $node1->command_fails_like( [ - 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast', - '--incremental', $backup1path . '/backup_manifest' + 'pg_basebackup', + '--pgdata' => $backup2path, + '--no-sync', + '--checkpoint' => 'fast', + '--incremental' => $backup1path . '/backup_manifest' ], qr/WAL summaries are required on timeline 1 from.*are incomplete/, "incremental backup fails"); diff --git a/src/bin/pg_combinebackup/t/008_promote.pl b/src/bin/pg_combinebackup/t/008_promote.pl index 7835364a9b0..732f6397103 100644 --- a/src/bin/pg_combinebackup/t/008_promote.pl +++ b/src/bin/pg_combinebackup/t/008_promote.pl @@ -31,7 +31,12 @@ EOM # Take a full backup. my $backup1path = $node1->backup_dir . '/backup1'; $node1->command_ok( - [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ], + [ + 'pg_basebackup', + '--pgdata' => $backup1path, + '--no-sync', + '--checkpoint' => 'fast', + ], "full backup from node1"); # Checkpoint and record LSN after. @@ -70,8 +75,11 @@ EOM my $backup2path = $node1->backup_dir . '/backup2'; $node2->command_ok( [ - 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast', - '--incremental', $backup1path . '/backup_manifest' + 'pg_basebackup', + '--pgdata' => $backup2path, + '--no-sync', + '--checkpoint' => 'fast', + '--incremental' => $backup1path . '/backup_manifest', ], "incremental backup from node2"); diff --git a/src/bin/pg_combinebackup/t/009_no_full_file.pl b/src/bin/pg_combinebackup/t/009_no_full_file.pl index 18218ad7a60..abe9e9a6a81 100644 --- a/src/bin/pg_combinebackup/t/009_no_full_file.pl +++ b/src/bin/pg_combinebackup/t/009_no_full_file.pl @@ -21,15 +21,23 @@ $primary->start; # Take a full backup. my $backup1path = $primary->backup_dir . '/backup1'; $primary->command_ok( - [ 'pg_basebackup', '-D', $backup1path, '--no-sync', '-cfast' ], + [ + 'pg_basebackup', + '--pgdata' => $backup1path, + '--no-sync', + '--checkpoint' => 'fast' + ], "full backup"); # Take an incremental backup. my $backup2path = $primary->backup_dir . '/backup2'; $primary->command_ok( [ - 'pg_basebackup', '-D', $backup2path, '--no-sync', '-cfast', - '--incremental', $backup1path . '/backup_manifest' + 'pg_basebackup', + '--pgdata' => $backup2path, + '--no-sync', + '--checkpoint' => 'fast', + '--incremental' => $backup1path . '/backup_manifest' ], "incremental backup"); @@ -57,7 +65,10 @@ for my $iname (@filelist) # pg_combinebackup should fail. my $outpath = $primary->backup_dir . '/out'; $primary->command_fails_like( - [ 'pg_combinebackup', $backup1path, $backup2path, '-o', $outpath, ], + [ + 'pg_combinebackup', $backup1path, + $backup2path, '--output' => $outpath, + ], qr/full backup contains unexpected incremental file/, "pg_combinebackup fails"); diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl index 5f7b5eb96d8..8c86faf3ad5 100644 --- a/src/bin/pg_ctl/t/001_start_stop.pl +++ b/src/bin/pg_ctl/t/001_start_stop.pl @@ -15,10 +15,15 @@ program_help_ok('pg_ctl'); program_version_ok('pg_ctl'); program_options_handling_ok('pg_ctl'); -command_exit_is([ 'pg_ctl', 'start', '-D', "$tempdir/nonexistent" ], +command_exit_is([ 'pg_ctl', 'start', '--pgdata' => "$tempdir/nonexistent" ], 1, 'pg_ctl start with nonexistent directory'); -command_ok([ 'pg_ctl', 'initdb', '-D', "$tempdir/data", '-o', '-N' ], +command_ok( + [ + 'pg_ctl', 'initdb', + '--pgdata' => "$tempdir/data", + '--options' => '--no-sync' + ], 'pg_ctl initdb'); command_ok([ $ENV{PG_REGRESS}, '--config-auth', "$tempdir/data" ], 'configure authentication'); @@ -41,8 +46,9 @@ else } close $conf; my $ctlcmd = [ - 'pg_ctl', 'start', '-D', "$tempdir/data", '-l', - "$PostgreSQL::Test::Utils::log_path/001_start_stop_server.log" + 'pg_ctl', 'start', + '--pgdata' => "$tempdir/data", + '--log' => "$PostgreSQL::Test::Utils::log_path/001_start_stop_server.log" ]; command_like($ctlcmd, qr/done.*server started/s, 'pg_ctl start'); @@ -51,17 +57,23 @@ command_like($ctlcmd, qr/done.*server started/s, 'pg_ctl start'); # postmaster they start. Waiting more than the 2 seconds slop time allowed # by wait_for_postmaster() prevents that mistake. sleep 3 if ($windows_os); -command_fails([ 'pg_ctl', 'start', '-D', "$tempdir/data" ], +command_fails([ 'pg_ctl', 'start', '--pgdata' => "$tempdir/data" ], 'second pg_ctl start fails'); -command_ok([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ], 'pg_ctl stop'); -command_fails([ 'pg_ctl', 'stop', '-D', "$tempdir/data" ], +command_ok([ 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data" ], + 'pg_ctl stop'); +command_fails([ 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data" ], 'second pg_ctl stop fails'); # Log file for default permission test. The permissions won't be checked on # Windows but we still want to do the restart test. my $logFileName = "$tempdir/data/perm-test-600.log"; -command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data", '-l', $logFileName ], +command_ok( + [ + 'pg_ctl', 'restart', + '--pgdata' => "$tempdir/data", + '--log' => $logFileName + ], 'pg_ctl restart with server not running'); # Permissions on log file should be default @@ -82,23 +94,27 @@ SKIP: skip "group access not supported on Windows", 3 if ($windows_os || $Config::Config{osname} eq 'cygwin'); - system_or_bail 'pg_ctl', 'stop', '-D', "$tempdir/data"; + system_or_bail 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data"; # Change the data dir mode so log file will be created with group read # privileges on the next start chmod_recursive("$tempdir/data", 0750, 0640); command_ok( - [ 'pg_ctl', 'start', '-D', "$tempdir/data", '-l', $logFileName ], + [ + 'pg_ctl', 'start', + '--pgdata' => "$tempdir/data", + '--log' => $logFileName + ], 'start server to check group permissions'); ok(-f $logFileName); ok(check_mode_recursive("$tempdir/data", 0750, 0640)); } -command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data" ], +command_ok([ 'pg_ctl', 'restart', '--pgdata' => "$tempdir/data" ], 'pg_ctl restart with server running'); -system_or_bail 'pg_ctl', 'stop', '-D', "$tempdir/data"; +system_or_bail 'pg_ctl', 'stop', '--pgdata' => "$tempdir/data"; done_testing(); diff --git a/src/bin/pg_ctl/t/002_status.pl b/src/bin/pg_ctl/t/002_status.pl index 1a72079827d..346f6919ac6 100644 --- a/src/bin/pg_ctl/t/002_status.pl +++ b/src/bin/pg_ctl/t/002_status.pl @@ -10,20 +10,23 @@ use Test::More; my $tempdir = PostgreSQL::Test::Utils::tempdir; -command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/nonexistent" ], +command_exit_is([ 'pg_ctl', 'status', '--pgdata' => "$tempdir/nonexistent" ], 4, 'pg_ctl status with nonexistent directory'); my $node = PostgreSQL::Test::Cluster->new('main'); $node->init; -command_exit_is([ 'pg_ctl', 'status', '-D', $node->data_dir ], +command_exit_is([ 'pg_ctl', 'status', '--pgdata' => $node->data_dir ], 3, 'pg_ctl status with server not running'); -system_or_bail 'pg_ctl', '-l', "$tempdir/logfile", '-D', - $node->data_dir, '-w', 'start'; -command_exit_is([ 'pg_ctl', 'status', '-D', $node->data_dir ], +system_or_bail( + 'pg_ctl', + '--log' => "$tempdir/logfile", + '--pgdata' => $node->data_dir, + '--wait', 'start'); +command_exit_is([ 'pg_ctl', 'status', '--pgdata' => $node->data_dir ], 0, 'pg_ctl status with server running'); -system_or_bail 'pg_ctl', 'stop', '-D', $node->data_dir; +system_or_bail 'pg_ctl', 'stop', '--pgdata' => $node->data_dir; done_testing(); diff --git a/src/bin/pg_ctl/t/003_promote.pl b/src/bin/pg_ctl/t/003_promote.pl index 78dfa3f2232..43a9bbac2ac 100644 --- a/src/bin/pg_ctl/t/003_promote.pl +++ b/src/bin/pg_ctl/t/003_promote.pl @@ -11,7 +11,7 @@ use Test::More; my $tempdir = PostgreSQL::Test::Utils::tempdir; command_fails_like( - [ 'pg_ctl', '-D', "$tempdir/nonexistent", 'promote' ], + [ 'pg_ctl', '--pgdata' => "$tempdir/nonexistent", 'promote' ], qr/directory .* does not exist/, 'pg_ctl promote with nonexistent directory'); @@ -19,14 +19,14 @@ my $node_primary = PostgreSQL::Test::Cluster->new('primary'); $node_primary->init(allows_streaming => 1); command_fails_like( - [ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ], + [ 'pg_ctl', '--pgdata' => $node_primary->data_dir, 'promote' ], qr/PID file .* does not exist/, 'pg_ctl promote of not running instance fails'); $node_primary->start; command_fails_like( - [ 'pg_ctl', '-D', $node_primary->data_dir, 'promote' ], + [ 'pg_ctl', '--pgdata' => $node_primary->data_dir, 'promote' ], qr/not in standby mode/, 'pg_ctl promote of primary instance fails'); @@ -39,8 +39,13 @@ $node_standby->start; is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'), 't', 'standby is in recovery'); -command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, '-W', 'promote' ], - 'pg_ctl -W promote of standby runs'); +command_ok( + [ + 'pg_ctl', + '--pgdata' => $node_standby->data_dir, + '--no-wait', 'promote' + ], + 'pg_ctl --no-wait promote of standby runs'); ok( $node_standby->poll_query_until( 'postgres', 'SELECT NOT pg_is_in_recovery()'), @@ -55,7 +60,7 @@ $node_standby->start; is($node_standby->safe_psql('postgres', 'SELECT pg_is_in_recovery()'), 't', 'standby is in recovery'); -command_ok([ 'pg_ctl', '-D', $node_standby->data_dir, 'promote' ], +command_ok([ 'pg_ctl', '--pgdata' => $node_standby->data_dir, 'promote' ], 'pg_ctl promote of standby runs'); # no wait here diff --git a/src/bin/pg_dump/t/002_pg_dump.pl b/src/bin/pg_dump/t/002_pg_dump.pl index bf65d44b942..a643a73270e 100644 --- a/src/bin/pg_dump/t/002_pg_dump.pl +++ b/src/bin/pg_dump/t/002_pg_dump.pl @@ -61,18 +61,19 @@ my $supports_zstd = check_pg_config("#define USE_ZSTD 1"); my %pgdump_runs = ( binary_upgrade => { dump_cmd => [ - 'pg_dump', - '--no-sync', - '--format=custom', - "--file=$tempdir/binary_upgrade.dump", - '-w', + 'pg_dump', '--no-sync', + '--format' => 'custom', + '--file' => "$tempdir/binary_upgrade.dump", + '--no-password', '--schema-only', '--binary-upgrade', - '-d', 'postgres', # alternative way to specify database + '--dbname' => 'postgres', # alternative way to specify database ], restore_cmd => [ - 'pg_restore', '-Fc', '--verbose', - "--file=$tempdir/binary_upgrade.sql", + 'pg_restore', + '--format' => 'custom', + '--verbose', + '--file' => "$tempdir/binary_upgrade.sql", "$tempdir/binary_upgrade.dump", ], }, @@ -82,18 +83,21 @@ my %pgdump_runs = ( test_key => 'compression', compile_option => 'gzip', dump_cmd => [ - 'pg_dump', '--format=custom', - '--compress=1', "--file=$tempdir/compression_gzip_custom.dump", + 'pg_dump', + '--format' => 'custom', + '--compress' => '1', + '--file' => "$tempdir/compression_gzip_custom.dump", 'postgres', ], restore_cmd => [ 'pg_restore', - "--file=$tempdir/compression_gzip_custom.sql", + '--file' => "$tempdir/compression_gzip_custom.sql", "$tempdir/compression_gzip_custom.dump", ], command_like => { command => [ - 'pg_restore', '-l', "$tempdir/compression_gzip_custom.dump", + 'pg_restore', '--list', + "$tempdir/compression_gzip_custom.dump", ], expected => qr/Compression: gzip/, name => 'data content is gzip-compressed' @@ -105,9 +109,12 @@ my %pgdump_runs = ( test_key => 'compression', compile_option => 'gzip', dump_cmd => [ - 'pg_dump', '--jobs=2', - '--format=directory', '--compress=gzip:1', - "--file=$tempdir/compression_gzip_dir", 'postgres', + 'pg_dump', + '--jobs' => '2', + '--format' => 'directory', + '--compress' => 'gzip:1', + '--file' => "$tempdir/compression_gzip_dir", + 'postgres', ], # Give coverage for manually compressed blobs.toc files during # restore. @@ -121,8 +128,9 @@ my %pgdump_runs = ( "$tempdir/compression_gzip_dir/*.dat.gz", ], restore_cmd => [ - 'pg_restore', '--jobs=2', - "--file=$tempdir/compression_gzip_dir.sql", + 'pg_restore', + '--jobs' => '2', + '--file' => "$tempdir/compression_gzip_dir.sql", "$tempdir/compression_gzip_dir", ], }, @@ -131,8 +139,11 @@ my %pgdump_runs = ( test_key => 'compression', compile_option => 'gzip', dump_cmd => [ - 'pg_dump', '--format=plain', '-Z1', - "--file=$tempdir/compression_gzip_plain.sql.gz", 'postgres', + 'pg_dump', + '--format' => 'plain', + '--compress' => '1', + '--file' => "$tempdir/compression_gzip_plain.sql.gz", + 'postgres', ], # Decompress the generated file to run through the tests. compress_cmd => { @@ -146,18 +157,22 @@ my %pgdump_runs = ( test_key => 'compression', compile_option => 'lz4', dump_cmd => [ - 'pg_dump', '--format=custom', - '--compress=lz4', "--file=$tempdir/compression_lz4_custom.dump", + 'pg_dump', + '--format' => 'custom', + '--compress' => 'lz4', + '--file' => "$tempdir/compression_lz4_custom.dump", 'postgres', ], restore_cmd => [ 'pg_restore', - "--file=$tempdir/compression_lz4_custom.sql", + '--file' => "$tempdir/compression_lz4_custom.sql", "$tempdir/compression_lz4_custom.dump", ], command_like => { - command => - [ 'pg_restore', '-l', "$tempdir/compression_lz4_custom.dump", ], + command => [ + 'pg_restore', '--list', + "$tempdir/compression_lz4_custom.dump", + ], expected => qr/Compression: lz4/, name => 'data content is lz4 compressed' }, @@ -168,9 +183,12 @@ my %pgdump_runs = ( test_key => 'compression', compile_option => 'lz4', dump_cmd => [ - 'pg_dump', '--jobs=2', - '--format=directory', '--compress=lz4:1', - "--file=$tempdir/compression_lz4_dir", 'postgres', + 'pg_dump', + '--jobs' => '2', + '--format' => 'directory', + '--compress' => 'lz4:1', + '--file' => "$tempdir/compression_lz4_dir", + 'postgres', ], # Verify that data files were compressed glob_patterns => [ @@ -178,8 +196,9 @@ my %pgdump_runs = ( "$tempdir/compression_lz4_dir/*.dat.lz4", ], restore_cmd => [ - 'pg_restore', '--jobs=2', - "--file=$tempdir/compression_lz4_dir.sql", + 'pg_restore', + '--jobs' => '2', + '--file' => "$tempdir/compression_lz4_dir.sql", "$tempdir/compression_lz4_dir", ], }, @@ -188,8 +207,11 @@ my %pgdump_runs = ( test_key => 'compression', compile_option => 'lz4', dump_cmd => [ - 'pg_dump', '--format=plain', '--compress=lz4', - "--file=$tempdir/compression_lz4_plain.sql.lz4", 'postgres', + 'pg_dump', + '--format' => 'plain', + '--compress' => 'lz4', + '--file' => "$tempdir/compression_lz4_plain.sql.lz4", + 'postgres', ], # Decompress the generated file to run through the tests. compress_cmd => { @@ -206,18 +228,21 @@ my %pgdump_runs = ( test_key => 'compression', compile_option => 'zstd', dump_cmd => [ - 'pg_dump', '--format=custom', - '--compress=zstd', "--file=$tempdir/compression_zstd_custom.dump", + 'pg_dump', + '--format' => 'custom', + '--compress' => 'zstd', + '--file' => "$tempdir/compression_zstd_custom.dump", 'postgres', ], restore_cmd => [ 'pg_restore', - "--file=$tempdir/compression_zstd_custom.sql", + '--file' => "$tempdir/compression_zstd_custom.sql", "$tempdir/compression_zstd_custom.dump", ], command_like => { command => [ - 'pg_restore', '-l', "$tempdir/compression_zstd_custom.dump", + 'pg_restore', '--list', + "$tempdir/compression_zstd_custom.dump", ], expected => qr/Compression: zstd/, name => 'data content is zstd compressed' @@ -228,9 +253,12 @@ my %pgdump_runs = ( test_key => 'compression', compile_option => 'zstd', dump_cmd => [ - 'pg_dump', '--jobs=2', - '--format=directory', '--compress=zstd:1', - "--file=$tempdir/compression_zstd_dir", 'postgres', + 'pg_dump', + '--jobs' => '2', + '--format' => 'directory', + '--compress' => 'zstd:1', + '--file' => "$tempdir/compression_zstd_dir", + 'postgres', ], # Give coverage for manually compressed blobs.toc files during # restore. @@ -247,8 +275,9 @@ my %pgdump_runs = ( "$tempdir/compression_zstd_dir/*.dat.zst", ], restore_cmd => [ - 'pg_restore', '--jobs=2', - "--file=$tempdir/compression_zstd_dir.sql", + 'pg_restore', + '--jobs' => '2', + '--file' => "$tempdir/compression_zstd_dir.sql", "$tempdir/compression_zstd_dir", ], }, @@ -258,8 +287,11 @@ my %pgdump_runs = ( test_key => 'compression', compile_option => 'zstd', dump_cmd => [ - 'pg_dump', '--format=plain', '--compress=zstd:long', - "--file=$tempdir/compression_zstd_plain.sql.zst", 'postgres', + 'pg_dump', + '--format' => 'plain', + '--compress' => 'zstd:long', + '--file' => "$tempdir/compression_zstd_plain.sql.zst", + 'postgres', ], # Decompress the generated file to run through the tests. compress_cmd => { @@ -274,81 +306,80 @@ my %pgdump_runs = ( clean => { dump_cmd => [ - 'pg_dump', - '--no-sync', - "--file=$tempdir/clean.sql", - '-c', - '-d', 'postgres', # alternative way to specify database + 'pg_dump', '--no-sync', + '--file' => "$tempdir/clean.sql", + '--clean', + '--dbname' => 'postgres', # alternative way to specify database ], }, clean_if_exists => { dump_cmd => [ - 'pg_dump', - '--no-sync', - "--file=$tempdir/clean_if_exists.sql", - '-c', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/clean_if_exists.sql", + '--clean', '--if-exists', - '--encoding=UTF8', # no-op, just tests that option is accepted + '--encoding' => 'UTF8', # no-op, just for testing 'postgres', ], }, column_inserts => { dump_cmd => [ 'pg_dump', '--no-sync', - "--file=$tempdir/column_inserts.sql", '-a', + '--file' => "$tempdir/column_inserts.sql", + '--data-only', '--column-inserts', 'postgres', ], }, createdb => { dump_cmd => [ - 'pg_dump', - '--no-sync', - "--file=$tempdir/createdb.sql", - '-C', - '-R', # no-op, just for testing - '-v', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/createdb.sql", + '--create', + '--no-reconnect', # no-op, just for testing + '--verbose', 'postgres', ], }, data_only => { dump_cmd => [ - 'pg_dump', - '--no-sync', - "--file=$tempdir/data_only.sql", - '-a', - '--superuser=test_superuser', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/data_only.sql", + '--data-only', + '--superuser' => 'test_superuser', '--disable-triggers', - '-v', # no-op, just make sure it works + '--verbose', # no-op, just make sure it works 'postgres', ], }, defaults => { dump_cmd => [ 'pg_dump', '--no-sync', - '-f', "$tempdir/defaults.sql", + '--file' => "$tempdir/defaults.sql", 'postgres', ], }, defaults_no_public => { database => 'regress_pg_dump_test', dump_cmd => [ - 'pg_dump', '--no-sync', '-f', "$tempdir/defaults_no_public.sql", + 'pg_dump', '--no-sync', + '--file' => "$tempdir/defaults_no_public.sql", 'regress_pg_dump_test', ], }, defaults_no_public_clean => { database => 'regress_pg_dump_test', dump_cmd => [ - 'pg_dump', '--no-sync', '-c', '-f', - "$tempdir/defaults_no_public_clean.sql", + 'pg_dump', '--no-sync', + '--clean', + '--file' => "$tempdir/defaults_no_public_clean.sql", 'regress_pg_dump_test', ], }, defaults_public_owner => { database => 'regress_public_owner', dump_cmd => [ - 'pg_dump', '--no-sync', '-f', - "$tempdir/defaults_public_owner.sql", + 'pg_dump', '--no-sync', + '--file' => "$tempdir/defaults_public_owner.sql", 'regress_public_owner', ], }, @@ -360,17 +391,22 @@ my %pgdump_runs = ( defaults_custom_format => { test_key => 'defaults', dump_cmd => [ - 'pg_dump', '-Fc', - "--file=$tempdir/defaults_custom_format.dump", 'postgres', + 'pg_dump', + '--format' => 'custom', + '--file' => "$tempdir/defaults_custom_format.dump", + 'postgres', ], restore_cmd => [ - 'pg_restore', '-Fc', - "--file=$tempdir/defaults_custom_format.sql", + 'pg_restore', + '--format' => 'custom', + '--file' => "$tempdir/defaults_custom_format.sql", "$tempdir/defaults_custom_format.dump", ], command_like => { - command => - [ 'pg_restore', '-l', "$tempdir/defaults_custom_format.dump", ], + command => [ + 'pg_restore', '--list', + "$tempdir/defaults_custom_format.dump", + ], expected => $supports_gzip ? qr/Compression: gzip/ : qr/Compression: none/, @@ -385,17 +421,20 @@ my %pgdump_runs = ( defaults_dir_format => { test_key => 'defaults', dump_cmd => [ - 'pg_dump', '-Fd', - "--file=$tempdir/defaults_dir_format", 'postgres', + 'pg_dump', + '--format' => 'directory', + '--file' => "$tempdir/defaults_dir_format", + 'postgres', ], restore_cmd => [ - 'pg_restore', '-Fd', - "--file=$tempdir/defaults_dir_format.sql", + 'pg_restore', + '--format' => 'directory', + '--file' => "$tempdir/defaults_dir_format.sql", "$tempdir/defaults_dir_format", ], command_like => { command => - [ 'pg_restore', '-l', "$tempdir/defaults_dir_format", ], + [ 'pg_restore', '--list', "$tempdir/defaults_dir_format", ], expected => $supports_gzip ? qr/Compression: gzip/ : qr/Compression: none/, name => 'data content is gzip-compressed by default', @@ -412,12 +451,15 @@ my %pgdump_runs = ( defaults_parallel => { test_key => 'defaults', dump_cmd => [ - 'pg_dump', '-Fd', '-j2', "--file=$tempdir/defaults_parallel", + 'pg_dump', + '--format' => 'directory', + '--jobs' => 2, + '--file' => "$tempdir/defaults_parallel", 'postgres', ], restore_cmd => [ 'pg_restore', - "--file=$tempdir/defaults_parallel.sql", + '--file' => "$tempdir/defaults_parallel.sql", "$tempdir/defaults_parallel", ], }, @@ -426,55 +468,56 @@ my %pgdump_runs = ( defaults_tar_format => { test_key => 'defaults', dump_cmd => [ - 'pg_dump', '-Ft', - "--file=$tempdir/defaults_tar_format.tar", 'postgres', + 'pg_dump', + '--format' => 'tar', + '--file' => "$tempdir/defaults_tar_format.tar", + 'postgres', ], restore_cmd => [ 'pg_restore', - '--format=tar', - "--file=$tempdir/defaults_tar_format.sql", + '--format' => 'tar', + '--file' => "$tempdir/defaults_tar_format.sql", "$tempdir/defaults_tar_format.tar", ], }, exclude_dump_test_schema => { dump_cmd => [ 'pg_dump', '--no-sync', - "--file=$tempdir/exclude_dump_test_schema.sql", - '--exclude-schema=dump_test', 'postgres', + '--file' => "$tempdir/exclude_dump_test_schema.sql", + '--exclude-schema' => 'dump_test', + 'postgres', ], }, exclude_test_table => { dump_cmd => [ 'pg_dump', '--no-sync', - "--file=$tempdir/exclude_test_table.sql", - '--exclude-table=dump_test.test_table', 'postgres', + '--file' => "$tempdir/exclude_test_table.sql", + '--exclude-table' => 'dump_test.test_table', + 'postgres', ], }, exclude_measurement => { dump_cmd => [ - 'pg_dump', - '--no-sync', - "--file=$tempdir/exclude_measurement.sql", - '--exclude-table-and-children=dump_test.measurement', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/exclude_measurement.sql", + '--exclude-table-and-children' => 'dump_test.measurement', 'postgres', ], }, exclude_measurement_data => { dump_cmd => [ - 'pg_dump', - '--no-sync', - "--file=$tempdir/exclude_measurement_data.sql", - '--exclude-table-data-and-children=dump_test.measurement', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/exclude_measurement_data.sql", + '--exclude-table-data-and-children' => 'dump_test.measurement', '--no-unlogged-table-data', 'postgres', ], }, exclude_test_table_data => { dump_cmd => [ - 'pg_dump', - '--no-sync', - "--file=$tempdir/exclude_test_table_data.sql", - '--exclude-table-data=dump_test.test_table', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/exclude_test_table_data.sql", + '--exclude-table-data' => 'dump_test.test_table', '--no-unlogged-table-data', 'postgres', ], @@ -482,168 +525,190 @@ my %pgdump_runs = ( inserts => { dump_cmd => [ 'pg_dump', '--no-sync', - "--file=$tempdir/inserts.sql", '-a', + '--file' => "$tempdir/inserts.sql", + '--data-only', '--inserts', 'postgres', ], }, pg_dumpall_globals => { dump_cmd => [ - 'pg_dumpall', '-v', "--file=$tempdir/pg_dumpall_globals.sql", - '-g', '--no-sync', + 'pg_dumpall', + '--verbose', + '--file' => "$tempdir/pg_dumpall_globals.sql", + '--globals-only', + '--no-sync', ], }, pg_dumpall_globals_clean => { dump_cmd => [ - 'pg_dumpall', "--file=$tempdir/pg_dumpall_globals_clean.sql", - '-g', '-c', '--no-sync', + 'pg_dumpall', + '--file' => "$tempdir/pg_dumpall_globals_clean.sql", + '--globals-only', + '--clean', + '--no-sync', ], }, pg_dumpall_dbprivs => { dump_cmd => [ 'pg_dumpall', '--no-sync', - "--file=$tempdir/pg_dumpall_dbprivs.sql", + '--file' => "$tempdir/pg_dumpall_dbprivs.sql", ], }, pg_dumpall_exclude => { dump_cmd => [ - 'pg_dumpall', '-v', "--file=$tempdir/pg_dumpall_exclude.sql", - '--exclude-database', '*dump_test*', '--no-sync', + 'pg_dumpall', + '--verbose', + '--file' => "$tempdir/pg_dumpall_exclude.sql", + '--exclude-database' => '*dump_test*', + '--no-sync', ], }, no_toast_compression => { dump_cmd => [ 'pg_dump', '--no-sync', - "--file=$tempdir/no_toast_compression.sql", - '--no-toast-compression', 'postgres', + '--file' => "$tempdir/no_toast_compression.sql", + '--no-toast-compression', + 'postgres', ], }, no_large_objects => { dump_cmd => [ - 'pg_dump', '--no-sync', "--file=$tempdir/no_large_objects.sql", - '-B', 'postgres', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/no_large_objects.sql", + '--no-large-objects', + 'postgres', ], }, no_privs => { dump_cmd => [ 'pg_dump', '--no-sync', - "--file=$tempdir/no_privs.sql", '-x', + '--file' => "$tempdir/no_privs.sql", + '--no-privileges', 'postgres', ], }, no_owner => { dump_cmd => [ 'pg_dump', '--no-sync', - "--file=$tempdir/no_owner.sql", '-O', + '--file' => "$tempdir/no_owner.sql", + '--no-owner', 'postgres', ], }, no_table_access_method => { dump_cmd => [ 'pg_dump', '--no-sync', - "--file=$tempdir/no_table_access_method.sql", - '--no-table-access-method', 'postgres', + '--file' => "$tempdir/no_table_access_method.sql", + '--no-table-access-method', + 'postgres', ], }, only_dump_test_schema => { dump_cmd => [ 'pg_dump', '--no-sync', - "--file=$tempdir/only_dump_test_schema.sql", - '--schema=dump_test', 'postgres', + '--file' => "$tempdir/only_dump_test_schema.sql", + '--schema' => 'dump_test', + 'postgres', ], }, only_dump_test_table => { dump_cmd => [ - 'pg_dump', - '--no-sync', - "--file=$tempdir/only_dump_test_table.sql", - '--table=dump_test.test_table', - '--lock-wait-timeout=' - . (1000 * $PostgreSQL::Test::Utils::timeout_default), + 'pg_dump', '--no-sync', + '--file' => "$tempdir/only_dump_test_table.sql", + '--table' => 'dump_test.test_table', + '--lock-wait-timeout' => + (1000 * $PostgreSQL::Test::Utils::timeout_default), 'postgres', ], }, only_dump_measurement => { dump_cmd => [ - 'pg_dump', - '--no-sync', - "--file=$tempdir/only_dump_measurement.sql", - '--table-and-children=dump_test.measurement', - '--lock-wait-timeout=' - . (1000 * $PostgreSQL::Test::Utils::timeout_default), + 'pg_dump', '--no-sync', + '--file' => "$tempdir/only_dump_measurement.sql", + '--table-and-children' => 'dump_test.measurement', + '--lock-wait-timeout' => + (1000 * $PostgreSQL::Test::Utils::timeout_default), 'postgres', ], }, role => { dump_cmd => [ - 'pg_dump', - '--no-sync', - "--file=$tempdir/role.sql", - '--role=regress_dump_test_role', - '--schema=dump_test_second_schema', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/role.sql", + '--role' => 'regress_dump_test_role', + '--schema' => 'dump_test_second_schema', 'postgres', ], }, role_parallel => { test_key => 'role', dump_cmd => [ - 'pg_dump', - '--no-sync', - '--format=directory', - '--jobs=2', - "--file=$tempdir/role_parallel", - '--role=regress_dump_test_role', - '--schema=dump_test_second_schema', + 'pg_dump', '--no-sync', + '--format' => 'directory', + '--jobs' => '2', + '--file' => "$tempdir/role_parallel", + '--role' => 'regress_dump_test_role', + '--schema' => 'dump_test_second_schema', 'postgres', ], restore_cmd => [ - 'pg_restore', "--file=$tempdir/role_parallel.sql", + 'pg_restore', + '--file' => "$tempdir/role_parallel.sql", "$tempdir/role_parallel", ], }, rows_per_insert => { dump_cmd => [ - 'pg_dump', - '--no-sync', - "--file=$tempdir/rows_per_insert.sql", - '-a', - '--rows-per-insert=4', - '--table=dump_test.test_table', - '--table=dump_test.test_fourth_table', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/rows_per_insert.sql", + '--data-only', + '--rows-per-insert' => '4', + '--table' => 'dump_test.test_table', + '--table' => 'dump_test.test_fourth_table', 'postgres', ], }, schema_only => { dump_cmd => [ - 'pg_dump', '--format=plain', - "--file=$tempdir/schema_only.sql", '--no-sync', - '-s', 'postgres', + 'pg_dump', '--no-sync', + '--format' => 'plain', + '--file' => "$tempdir/schema_only.sql", + '--schema-only', + 'postgres', ], }, section_pre_data => { dump_cmd => [ - 'pg_dump', "--file=$tempdir/section_pre_data.sql", - '--section=pre-data', '--no-sync', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/section_pre_data.sql", + '--section' => 'pre-data', 'postgres', ], }, section_data => { dump_cmd => [ - 'pg_dump', "--file=$tempdir/section_data.sql", - '--section=data', '--no-sync', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/section_data.sql", + '--section' => 'data', 'postgres', ], }, section_post_data => { dump_cmd => [ - 'pg_dump', "--file=$tempdir/section_post_data.sql", - '--section=post-data', '--no-sync', 'postgres', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/section_post_data.sql", + '--section' => 'post-data', + 'postgres', ], }, test_schema_plus_large_objects => { dump_cmd => [ - 'pg_dump', "--file=$tempdir/test_schema_plus_large_objects.sql", - - '--schema=dump_test', '-b', '-B', '--no-sync', 'postgres', + 'pg_dump', '--no-sync', + '--file' => "$tempdir/test_schema_plus_large_objects.sql", + '--schema' => 'dump_test', + '--large-objects', + '--no-large-objects', + 'postgres', ], },); @@ -4732,7 +4797,7 @@ foreach my $db (sort keys %create_sql) # Test connecting to a non-existent database command_fails_like( - [ 'pg_dump', '-p', "$port", 'qqq' ], + [ 'pg_dump', '--port' => $port, 'qqq' ], qr/pg_dump: error: connection to server .* failed: FATAL: database "qqq" does not exist/, 'connecting to a non-existent database'); @@ -4740,7 +4805,7 @@ command_fails_like( # Test connecting to an invalid database $node->command_fails_like( - [ 'pg_dump', '-d', 'regression_invalid' ], + [ 'pg_dump', '--dbname' => 'regression_invalid' ], qr/pg_dump: error: connection to server .* failed: FATAL: cannot connect to invalid database "regression_invalid"/, 'connecting to an invalid database'); @@ -4748,7 +4813,7 @@ $node->command_fails_like( # Test connecting with an unprivileged user command_fails_like( - [ 'pg_dump', '-p', "$port", '--role=regress_dump_test_role' ], + [ 'pg_dump', '--port' => $port, '--role' => 'regress_dump_test_role' ], qr/\Qpg_dump: error: query failed: ERROR: permission denied for\E/, 'connecting with an unprivileged user'); @@ -4756,22 +4821,32 @@ command_fails_like( # Test dumping a non-existent schema, table, and patterns with --strict-names command_fails_like( - [ 'pg_dump', '-p', "$port", '-n', 'nonexistent' ], + [ 'pg_dump', '--port' => $port, '--schema' => 'nonexistent' ], qr/\Qpg_dump: error: no matching schemas were found\E/, 'dumping a non-existent schema'); command_fails_like( - [ 'pg_dump', '-p', "$port", '-t', 'nonexistent' ], + [ 'pg_dump', '--port' => $port, '--table' => 'nonexistent' ], qr/\Qpg_dump: error: no matching tables were found\E/, 'dumping a non-existent table'); command_fails_like( - [ 'pg_dump', '-p', "$port", '--strict-names', '-n', 'nonexistent*' ], + [ + 'pg_dump', + '--port' => $port, + '--strict-names', + '--schema' => 'nonexistent*' + ], qr/\Qpg_dump: error: no matching schemas were found for pattern\E/, 'no matching schemas'); command_fails_like( - [ 'pg_dump', '-p', "$port", '--strict-names', '-t', 'nonexistent*' ], + [ + 'pg_dump', + '--port' => $port, + '--strict-names', + '--table' => 'nonexistent*' + ], qr/\Qpg_dump: error: no matching tables were found for pattern\E/, 'no matching tables'); @@ -4779,26 +4854,31 @@ command_fails_like( # Test invalid multipart database names $node->command_fails_like( - [ 'pg_dumpall', '--exclude-database', '.' ], + [ 'pg_dumpall', '--exclude-database' => '.' ], qr/pg_dumpall: error: improper qualified name \(too many dotted names\): \./, 'pg_dumpall: option --exclude-database rejects multipart pattern "."'); $node->command_fails_like( - [ 'pg_dumpall', '--exclude-database', 'myhost.mydb' ], + [ 'pg_dumpall', '--exclude-database' => 'myhost.mydb' ], qr/pg_dumpall: error: improper qualified name \(too many dotted names\): myhost\.mydb/, 'pg_dumpall: option --exclude-database rejects multipart database names'); ############################################################## # Test dumping pg_catalog (for research -- cannot be reloaded) -$node->command_ok([ 'pg_dump', '-p', "$port", '-n', 'pg_catalog' ], +$node->command_ok( + [ 'pg_dump', '--port' => $port, '--schema' => 'pg_catalog' ], 'pg_dump: option -n pg_catalog'); ######################################### # Test valid database exclusion patterns $node->command_ok( - [ 'pg_dumpall', '-p', "$port", '--exclude-database', '"myhost.mydb"' ], + [ + 'pg_dumpall', + '--port' => $port, + '--exclude-database' => '"myhost.mydb"' + ], 'pg_dumpall: option --exclude-database handles database names with embedded dots' ); @@ -4806,28 +4886,28 @@ $node->command_ok( # Test invalid multipart schema names $node->command_fails_like( - [ 'pg_dump', '--schema', 'myhost.mydb.myschema' ], + [ 'pg_dump', '--schema' => 'myhost.mydb.myschema' ], qr/pg_dump: error: improper qualified name \(too many dotted names\): myhost\.mydb\.myschema/, 'pg_dump: option --schema rejects three-part schema names'); $node->command_fails_like( - [ 'pg_dump', '--schema', 'otherdb.myschema' ], + [ 'pg_dump', '--schema' => 'otherdb.myschema' ], qr/pg_dump: error: cross-database references are not implemented: otherdb\.myschema/, 'pg_dump: option --schema rejects cross-database multipart schema names'); $node->command_fails_like( - [ 'pg_dump', '--schema', '.' ], + [ 'pg_dump', '--schema' => '.' ], qr/pg_dump: error: cross-database references are not implemented: \./, 'pg_dump: option --schema rejects degenerate two-part schema name: "."'); $node->command_fails_like( - [ 'pg_dump', '--schema', '"some.other.db".myschema' ], + [ 'pg_dump', '--schema' => '"some.other.db".myschema' ], qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.myschema/, 'pg_dump: option --schema rejects cross-database multipart schema names with embedded dots' ); $node->command_fails_like( - [ 'pg_dump', '--schema', '..' ], + [ 'pg_dump', '--schema' => '..' ], qr/pg_dump: error: improper qualified name \(too many dotted names\): \.\./, 'pg_dump: option --schema rejects degenerate three-part schema name: ".."' ); @@ -4836,19 +4916,20 @@ $node->command_fails_like( # Test invalid multipart relation names $node->command_fails_like( - [ 'pg_dump', '--table', 'myhost.mydb.myschema.mytable' ], + [ 'pg_dump', '--table' => 'myhost.mydb.myschema.mytable' ], qr/pg_dump: error: improper relation name \(too many dotted names\): myhost\.mydb\.myschema\.mytable/, 'pg_dump: option --table rejects four-part table names'); $node->command_fails_like( - [ 'pg_dump', '--table', 'otherdb.pg_catalog.pg_class' ], + [ 'pg_dump', '--table' => 'otherdb.pg_catalog.pg_class' ], qr/pg_dump: error: cross-database references are not implemented: otherdb\.pg_catalog\.pg_class/, 'pg_dump: option --table rejects cross-database three part table names'); command_fails_like( [ - 'pg_dump', '-p', "$port", '--table', - '"some.other.db".pg_catalog.pg_class' + 'pg_dump', + '--port' => $port, + '--table' => '"some.other.db".pg_catalog.pg_class' ], qr/pg_dump: error: cross-database references are not implemented: "some\.other\.db"\.pg_catalog\.pg_class/, 'pg_dump: option --table rejects cross-database three part table names with embedded dots' diff --git a/src/bin/pg_dump/t/003_pg_dump_with_server.pl b/src/bin/pg_dump/t/003_pg_dump_with_server.pl index 929a57e1e59..8dc014ed6ed 100644 --- a/src/bin/pg_dump/t/003_pg_dump_with_server.pl +++ b/src/bin/pg_dump/t/003_pg_dump_with_server.pl @@ -28,12 +28,23 @@ $node->safe_psql('postgres', "CREATE FOREIGN TABLE t0 (a int) SERVER s0"); $node->safe_psql('postgres', "CREATE FOREIGN TABLE t1 (a int) SERVER s1"); command_fails_like( - [ "pg_dump", '-p', $port, '--include-foreign-data=s0', 'postgres' ], + [ + "pg_dump", + '--port' => $port, + '--include-foreign-data' => 's0', + 'postgres' + ], qr/foreign-data wrapper \"dummy\" has no handler\r?\npg_dump: detail: Query was: .*t0/, "correctly fails to dump a foreign table from a dummy FDW"); command_ok( - [ "pg_dump", '-p', $port, '-a', '--include-foreign-data=s2', 'postgres' ], + [ + "pg_dump", + '--port' => $port, + '--data-only', + '--include-foreign-data' => 's2', + 'postgres' + ], "dump foreign server with no tables"); done_testing(); diff --git a/src/bin/pg_dump/t/004_pg_dump_parallel.pl b/src/bin/pg_dump/t/004_pg_dump_parallel.pl index 3215c0c7e93..fcbe74ec8e9 100644 --- a/src/bin/pg_dump/t/004_pg_dump_parallel.pl +++ b/src/bin/pg_dump/t/004_pg_dump_parallel.pl @@ -48,33 +48,42 @@ insert into tht select (x%10)::text::digit, x from generate_series(1,1000) x; $node->command_ok( [ - 'pg_dump', '-Fd', '--no-sync', '-j2', '-f', "$backupdir/dump1", - $node->connstr($dbname1) + 'pg_dump', + '--format' => 'directory', + '--no-sync', + '--jobs' => 2, + '--file' => "$backupdir/dump1", + $node->connstr($dbname1), ], 'parallel dump'); $node->command_ok( [ - 'pg_restore', '-v', - '-d', $node->connstr($dbname2), - '-j3', "$backupdir/dump1" + 'pg_restore', '--verbose', + '--dbname' => $node->connstr($dbname2), + '--jobs' => 3, + "$backupdir/dump1", ], 'parallel restore'); $node->command_ok( [ - 'pg_dump', '-Fd', - '--no-sync', '-j2', - '-f', "$backupdir/dump2", - '--inserts', $node->connstr($dbname1) + 'pg_dump', + '--format' => 'directory', + '--no-sync', + '--jobs' => 2, + '--file' => "$backupdir/dump2", + '--inserts', + $node->connstr($dbname1), ], 'parallel dump as inserts'); $node->command_ok( [ - 'pg_restore', '-v', - '-d', $node->connstr($dbname3), - '-j3', "$backupdir/dump2" + 'pg_restore', '--verbose', + '--dbname' => $node->connstr($dbname3), + '--jobs' => 3, + "$backupdir/dump2", ], 'parallel restore as inserts'); diff --git a/src/bin/pg_dump/t/005_pg_dump_filterfile.pl b/src/bin/pg_dump/t/005_pg_dump_filterfile.pl index 3568a246b23..f05e8a20e05 100644 --- a/src/bin/pg_dump/t/005_pg_dump_filterfile.pl +++ b/src/bin/pg_dump/t/005_pg_dump_filterfile.pl @@ -90,8 +90,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "filter file without patterns"); @@ -117,8 +120,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "dump tables with filter patterns as well as comments and whitespace"); @@ -143,8 +149,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "filter file without patterns"); @@ -162,8 +171,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "dump tables with exclusion of a single table"); @@ -183,8 +195,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "dump tables with wildcard in pattern"); @@ -205,8 +220,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "dump tables with multiline names requiring quoting"); @@ -223,8 +241,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "dump tables with filter"); @@ -241,8 +262,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "exclude the public schema"); @@ -263,9 +287,12 @@ close $alt_inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", - "--filter=$tempdir/inputfile2.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + '--filter' => "$tempdir/inputfile2.txt", + 'postgres' ], "exclude the public schema with multiple filters"); @@ -284,8 +311,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "dump tables with filter"); @@ -301,8 +331,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "dump tables with filter"); @@ -321,8 +354,11 @@ close $inputfile; command_fails_like( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], qr/pg_dump: error: no matching foreign servers were found for pattern/, "dump nonexisting foreign server"); @@ -334,8 +370,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "dump foreign_data with filter"); @@ -350,8 +389,11 @@ close $inputfile; command_fails_like( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], qr/exclude filter for "foreign data" is not allowed/, "erroneously exclude foreign server"); @@ -367,8 +409,11 @@ close $inputfile; command_fails_like( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], qr/invalid filter command/, "invalid syntax: incorrect filter command"); @@ -381,8 +426,11 @@ close $inputfile; command_fails_like( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], qr/unsupported filter object type: "xxx"/, "invalid syntax: invalid object type specified, should be table, schema, foreign_data or data" @@ -396,8 +444,11 @@ close $inputfile; command_fails_like( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], qr/missing object name/, "invalid syntax: missing object identifier pattern"); @@ -410,8 +461,11 @@ close $inputfile; command_fails_like( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], qr/no matching tables were found/, "invalid syntax: extra content after object identifier pattern"); @@ -427,8 +481,10 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", '--strict-names', 'postgres' ], "strict names with matching pattern"); @@ -445,8 +501,10 @@ close $inputfile; command_fails_like( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", '--strict-names', 'postgres' ], qr/no matching tables were found/, @@ -464,8 +522,10 @@ close $inputfile; command_ok( [ - 'pg_dumpall', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt" + 'pg_dumpall', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt" ], "dump tables with exclusion of a database"); @@ -478,8 +538,10 @@ ok($dump =~ qr/^\\connect template1/m, "database template1 is dumped"); # --globals-only with exclusions command_fails_like( [ - 'pg_dumpall', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", + 'pg_dumpall', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", '--globals-only' ], qr/\Qpg_dumpall: error: option --exclude-database cannot be used together with -g\/--globals-only\E/, @@ -494,8 +556,10 @@ close $inputfile; command_fails_like( [ - 'pg_dumpall', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt" + 'pg_dumpall', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt" ], qr/invalid filter command/, "invalid syntax: incorrect filter command"); @@ -508,8 +572,10 @@ close $inputfile; command_fails_like( [ - 'pg_dumpall', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt" + 'pg_dumpall', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt" ], qr/unsupported filter object type: "xxx"/, "invalid syntax: exclusion of non-existing object type"); @@ -521,8 +587,10 @@ close $inputfile; command_fails_like( [ - 'pg_dumpall', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt" + 'pg_dumpall', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt" ], qr/pg_dumpall: error: invalid format in filter/, "invalid syntax: exclusion of unsupported object type"); @@ -532,8 +600,11 @@ command_fails_like( command_ok( [ - 'pg_dump', '-p', $port, '-f', "$tempdir/filter_test.dump", - "-Fc", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => "$tempdir/filter_test.dump", + '--format' => 'custom', + 'postgres' ], "dump all tables"); @@ -544,9 +615,12 @@ close $inputfile; command_ok( [ - 'pg_restore', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", - "-Fc", "$tempdir/filter_test.dump" + 'pg_restore', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + '--format' => 'custom', + "$tempdir/filter_test.dump" ], "restore tables with filter"); @@ -563,8 +637,10 @@ close $inputfile; command_fails_like( [ - 'pg_restore', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt" + 'pg_restore', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt" ], qr/include filter for "table data" is not allowed/, "invalid syntax: inclusion of unallowed object"); @@ -576,8 +652,10 @@ close $inputfile; command_fails_like( [ - 'pg_restore', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt" + 'pg_restore', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt" ], qr/include filter for "extension" is not allowed/, "invalid syntax: inclusion of unallowed object"); @@ -589,8 +667,10 @@ close $inputfile; command_fails_like( [ - 'pg_restore', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt" + 'pg_restore', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt" ], qr/exclude filter for "extension" is not allowed/, "invalid syntax: exclusion of unallowed object"); @@ -602,8 +682,10 @@ close $inputfile; command_fails_like( [ - 'pg_restore', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt" + 'pg_restore', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt" ], qr/exclude filter for "table data" is not allowed/, "invalid syntax: exclusion of unallowed object"); @@ -613,8 +695,11 @@ command_fails_like( command_ok( [ - 'pg_dump', '-p', $port, '-f', "$tempdir/filter_test.dump", - "-Fc", 'sourcedb' + 'pg_dump', + '--port' => $port, + '--file' => "$tempdir/filter_test.dump", + '--format' => 'custom', + 'sourcedb' ], "dump all objects from sourcedb"); @@ -625,9 +710,12 @@ close $inputfile; command_ok( [ - 'pg_restore', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", - "-Fc", "$tempdir/filter_test.dump" + 'pg_restore', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + '--format' => 'custom', + "$tempdir/filter_test.dump" ], "restore function with filter"); @@ -646,9 +734,12 @@ close $inputfile; command_ok( [ - 'pg_restore', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", - "-Fc", "$tempdir/filter_test.dump" + 'pg_restore', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + '--format' => 'custom', + "$tempdir/filter_test.dump" ], "restore function with filter"); @@ -667,9 +758,12 @@ close $inputfile; command_ok( [ - 'pg_restore', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", - "-Fc", "$tempdir/filter_test.dump" + 'pg_restore', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + '--format' => 'custom', + "$tempdir/filter_test.dump" ], "restore function with filter"); @@ -687,9 +781,12 @@ close $inputfile; command_ok( [ - 'pg_restore', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", - "-Fc", "$tempdir/filter_test.dump" + 'pg_restore', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + '--format' => 'custom', + "$tempdir/filter_test.dump" ], "restore function with filter"); @@ -707,9 +804,12 @@ close $inputfile; command_ok( [ - 'pg_restore', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", - "-Fc", "$tempdir/filter_test.dump" + 'pg_restore', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + '--format' => 'custom', + "$tempdir/filter_test.dump" ], "restore function with filter"); @@ -733,8 +833,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "filter file without patterns"); @@ -750,8 +853,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "filter file without patterns"); @@ -768,8 +874,11 @@ close $inputfile; command_ok( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], "filter file without patterns"); @@ -788,8 +897,11 @@ close $inputfile; command_fails_like( [ - 'pg_dump', '-p', $port, '-f', $plainfile, - "--filter=$tempdir/inputfile.txt", 'postgres' + 'pg_dump', + '--port' => $port, + '--file' => $plainfile, + '--filter' => "$tempdir/inputfile.txt", + 'postgres' ], qr/pg_dump: error: no matching extensions were found/, "dump nonexisting extension"); diff --git a/src/bin/pg_dump/t/010_dump_connstr.pl b/src/bin/pg_dump/t/010_dump_connstr.pl index 435f7ab694c..bde6096c60d 100644 --- a/src/bin/pg_dump/t/010_dump_connstr.pl +++ b/src/bin/pg_dump/t/010_dump_connstr.pl @@ -51,16 +51,20 @@ my $src_bootstrap_super = 'regress_postgres'; my $dst_bootstrap_super = 'boot'; my $node = PostgreSQL::Test::Cluster->new('main'); -$node->init(extra => - [ '-U', $src_bootstrap_super, '--locale=C', '--encoding=LATIN1' ]); +$node->init( + extra => [ + '--username' => $src_bootstrap_super, + '--locale' => 'C', + '--encoding' => 'LATIN1', + ]); # prep pg_hba.conf and pg_ident.conf $node->run_log( [ - $ENV{PG_REGRESS}, '--config-auth', - $node->data_dir, '--user', - $src_bootstrap_super, '--create-role', - "$username1,$username2,$username3,$username4" + $ENV{PG_REGRESS}, + '--config-auth' => $node->data_dir, + '--user' => $src_bootstrap_super, + '--create-role' => "$username1,$username2,$username3,$username4", ]); $node->start; @@ -69,106 +73,158 @@ my $discard = "$backupdir/discard.sql"; my $plain = "$backupdir/plain.sql"; my $dirfmt = "$backupdir/dirfmt"; -$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname1 ]); $node->run_log( - [ 'createuser', '-U', $src_bootstrap_super, '-s', $username1 ]); -$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname2 ]); + [ 'createdb', '--username' => $src_bootstrap_super, $dbname1 ]); $node->run_log( - [ 'createuser', '-U', $src_bootstrap_super, '-s', $username2 ]); -$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname3 ]); + [ + 'createuser', + '--username' => $src_bootstrap_super, + '--superuser', + $username1, + ]); $node->run_log( - [ 'createuser', '-U', $src_bootstrap_super, '-s', $username3 ]); -$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname4 ]); + [ 'createdb', '--username' => $src_bootstrap_super, $dbname2 ]); $node->run_log( - [ 'createuser', '-U', $src_bootstrap_super, '-s', $username4 ]); + [ + 'createuser', + '--username' => $src_bootstrap_super, + '--superuser', + $username2, + ]); +$node->run_log( + [ 'createdb', '--username' => $src_bootstrap_super, $dbname3 ]); +$node->run_log( + [ + 'createuser', + '--username' => $src_bootstrap_super, + '--superuser', + $username3, + ]); +$node->run_log( + [ 'createdb', '--username' => $src_bootstrap_super, $dbname4 ]); +$node->run_log( + [ + 'createuser', + '--username' => $src_bootstrap_super, + '--superuser', + $username4, + ]); -# For these tests, pg_dumpall -r is used because it produces a short -# dump. +# For these tests, pg_dumpall --roles-only is used because it produces +# a short dump. $node->command_ok( [ - 'pg_dumpall', '-r', '-f', $discard, '--dbname', - $node->connstr($dbname1), - '-U', $username4 + 'pg_dumpall', '--roles-only', + '--file' => $discard, + '--dbname' => $node->connstr($dbname1), + '--username' => $username4, ], 'pg_dumpall with long ASCII name 1'); $node->command_ok( [ - 'pg_dumpall', '--no-sync', '-r', '-f', $discard, '--dbname', - $node->connstr($dbname2), - '-U', $username3 + 'pg_dumpall', '--no-sync', '--roles-only', + '--file' => $discard, + '--dbname' => $node->connstr($dbname2), + '--username' => $username3, ], 'pg_dumpall with long ASCII name 2'); $node->command_ok( [ - 'pg_dumpall', '--no-sync', '-r', '-f', $discard, '--dbname', - $node->connstr($dbname3), - '-U', $username2 + 'pg_dumpall', '--no-sync', '--roles-only', + '--file' => $discard, + '--dbname' => $node->connstr($dbname3), + '--username' => $username2, ], 'pg_dumpall with long ASCII name 3'); $node->command_ok( [ - 'pg_dumpall', '--no-sync', '-r', '-f', $discard, '--dbname', - $node->connstr($dbname4), - '-U', $username1 + 'pg_dumpall', '--no-sync', '--roles-only', + '--file' => $discard, + '--dbname' => $node->connstr($dbname4), + '--username' => $username1, ], 'pg_dumpall with long ASCII name 4'); $node->command_ok( [ - 'pg_dumpall', '-U', - $src_bootstrap_super, '--no-sync', - '-r', '-l', - 'dbname=template1' + 'pg_dumpall', '--no-sync', '--roles-only', + '--username' => $src_bootstrap_super, + '--dbname' => 'dbname=template1', ], - 'pg_dumpall -l accepts connection string'); + 'pg_dumpall --dbname accepts connection string'); -$node->run_log([ 'createdb', '-U', $src_bootstrap_super, "foo\n\rbar" ]); +$node->run_log( + [ 'createdb', '--username' => $src_bootstrap_super, "foo\n\rbar" ]); -# not sufficient to use -r here +# not sufficient to use --roles-only here $node->command_fails( - [ 'pg_dumpall', '-U', $src_bootstrap_super, '--no-sync', '-f', $discard ], + [ + 'pg_dumpall', '--no-sync', + '--username' => $src_bootstrap_super, + '--file' => $discard, + ], 'pg_dumpall with \n\r in database name'); -$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, "foo\n\rbar" ]); +$node->run_log( + [ 'dropdb', '--username' => $src_bootstrap_super, "foo\n\rbar" ]); # make a table, so the parallel worker has something to dump $node->safe_psql( $dbname1, 'CREATE TABLE t0()', - extra_params => [ '-U', $src_bootstrap_super ]); + extra_params => [ '--username' => $src_bootstrap_super ]); # XXX no printed message when this fails, just SIGPIPE termination $node->command_ok( [ - 'pg_dump', '-Fd', '--no-sync', '-j2', '-f', $dirfmt, '-U', $username1, - $node->connstr($dbname1) + 'pg_dump', + '--format' => 'directory', + '--no-sync', + '--jobs' => 2, + '--file' => $dirfmt, + '--username' => $username1, + $node->connstr($dbname1), ], 'parallel dump'); # recreate $dbname1 for restore test -$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]); -$node->run_log([ 'createdb', '-U', $src_bootstrap_super, $dbname1 ]); +$node->run_log([ 'dropdb', '--username' => $src_bootstrap_super, $dbname1 ]); +$node->run_log( + [ 'createdb', '--username' => $src_bootstrap_super, $dbname1 ]); $node->command_ok( [ - 'pg_restore', '-v', '-d', 'template1', - '-j2', '-U', $username1, $dirfmt + 'pg_restore', + '--verbose', + '--dbname' => 'template1', + '--jobs' => 2, + '--username' => $username1, + $dirfmt, ], 'parallel restore'); -$node->run_log([ 'dropdb', '-U', $src_bootstrap_super, $dbname1 ]); +$node->run_log([ 'dropdb', '--username' => $src_bootstrap_super, $dbname1 ]); $node->command_ok( [ - 'pg_restore', '-C', '-v', '-d', - 'template1', '-j2', '-U', $username1, - $dirfmt + 'pg_restore', + '--create', + '--verbose', + '--dbname' => 'template1', + '--jobs' => 2, + '--username' => $username1, + $dirfmt, ], 'parallel restore with create'); $node->command_ok( - [ 'pg_dumpall', '--no-sync', '-f', $plain, '-U', $username1 ], + [ + 'pg_dumpall', + '--no-sync', + '--file' => $plain, + '--username' => $username1, + ], 'take full dump'); system_log('cat', $plain); my ($stderr, $result); @@ -183,20 +239,29 @@ $restore_super =~ s/"//g my $envar_node = PostgreSQL::Test::Cluster->new('destination_envar'); $envar_node->init( - extra => - [ '-U', $dst_bootstrap_super, '--locale=C', '--encoding=LATIN1' ], + extra => [ + '--username' => $dst_bootstrap_super, + '--locale' => 'C', + '--encoding' => 'LATIN1', + ], auth_extra => - [ '--user', $dst_bootstrap_super, '--create-role', $restore_super ]); + [ '--user' => $dst_bootstrap_super, '--create-role' => $restore_super ], +); $envar_node->start; # make superuser for restore $envar_node->run_log( - [ 'createuser', '-U', $dst_bootstrap_super, '-s', $restore_super ]); + [ + 'createuser', + '--username' => $dst_bootstrap_super, + '--superuser', $restore_super, + ]); { local $ENV{PGPORT} = $envar_node->port; local $ENV{PGUSER} = $restore_super; - $result = run_log([ 'psql', '-X', '-f', $plain ], '2>', \$stderr); + $result = run_log([ 'psql', '--no-psqlrc', '--file' => $plain ], + '2>' => \$stderr); } ok($result, 'restore full dump using environment variables for connection parameters' @@ -210,21 +275,32 @@ is($stderr, '', 'no dump errors'); my $cmdline_node = PostgreSQL::Test::Cluster->new('destination_cmdline'); $cmdline_node->init( - extra => - [ '-U', $dst_bootstrap_super, '--locale=C', '--encoding=LATIN1' ], + extra => [ + '--username' => $dst_bootstrap_super, + '--locale' => 'C', + '--encoding' => 'LATIN1', + ], auth_extra => - [ '--user', $dst_bootstrap_super, '--create-role', $restore_super ]); + [ '--user' => $dst_bootstrap_super, '--create-role' => $restore_super ], +); $cmdline_node->start; $cmdline_node->run_log( - [ 'createuser', '-U', $dst_bootstrap_super, '-s', $restore_super ]); + [ + 'createuser', + '--username' => $dst_bootstrap_super, + '--superuser', + $restore_super, + ]); { $result = run_log( [ - 'psql', '-p', $cmdline_node->port, '-U', - $restore_super, '-X', '-f', $plain + 'psql', + '--port' => $cmdline_node->port, + '--username' => $restore_super, + '--no-psqlrc', + '--file' => $plain, ], - '2>', - \$stderr); + '2>' => \$stderr); } ok($result, 'restore full dump with command-line options for connection parameters'); diff --git a/src/bin/pg_resetwal/t/001_basic.pl b/src/bin/pg_resetwal/t/001_basic.pl index d0bd1f7ace8..323cd483cf3 100644 --- a/src/bin/pg_resetwal/t/001_basic.pl +++ b/src/bin/pg_resetwal/t/001_basic.pl @@ -30,7 +30,8 @@ SKIP: 'check PGDATA permissions'); } -command_ok([ 'pg_resetwal', '-D', $node->data_dir ], 'pg_resetwal runs'); +command_ok([ 'pg_resetwal', '--pgdata' => $node->data_dir ], + 'pg_resetwal runs'); $node->start; is($node->safe_psql("postgres", "SELECT 1;"), 1, 'server running and working after reset'); @@ -46,7 +47,7 @@ command_fails_like( qr/database server was not shut down cleanly/, 'does not run after immediate shutdown'); command_ok( - [ 'pg_resetwal', '-f', $node->data_dir ], + [ 'pg_resetwal', '--force', $node->data_dir ], 'runs after immediate shutdown with force'); $node->start; is($node->safe_psql("postgres", "SELECT 1;"), @@ -80,111 +81,111 @@ command_fails_like( # error cases # -c command_fails_like( - [ 'pg_resetwal', '-c', 'foo', $node->data_dir ], + [ 'pg_resetwal', '-c' => 'foo', $node->data_dir ], qr/error: invalid argument for option -c/, 'fails with incorrect -c option'); command_fails_like( - [ 'pg_resetwal', '-c', '10,bar', $node->data_dir ], + [ 'pg_resetwal', '-c' => '10,bar', $node->data_dir ], qr/error: invalid argument for option -c/, 'fails with incorrect -c option part 2'); command_fails_like( - [ 'pg_resetwal', '-c', '1,10', $node->data_dir ], + [ 'pg_resetwal', '-c' => '1,10', $node->data_dir ], qr/greater than/, - 'fails with -c value 1 part 1'); + 'fails with -c ids value 1 part 1'); command_fails_like( - [ 'pg_resetwal', '-c', '10,1', $node->data_dir ], + [ 'pg_resetwal', '-c' => '10,1', $node->data_dir ], qr/greater than/, 'fails with -c value 1 part 2'); # -e command_fails_like( - [ 'pg_resetwal', '-e', 'foo', $node->data_dir ], + [ 'pg_resetwal', '-e' => 'foo', $node->data_dir ], qr/error: invalid argument for option -e/, 'fails with incorrect -e option'); command_fails_like( - [ 'pg_resetwal', '-e', '-1', $node->data_dir ], + [ 'pg_resetwal', '-e' => '-1', $node->data_dir ], qr/must not be -1/, 'fails with -e value -1'); # -l command_fails_like( - [ 'pg_resetwal', '-l', 'foo', $node->data_dir ], + [ 'pg_resetwal', '-l' => 'foo', $node->data_dir ], qr/error: invalid argument for option -l/, 'fails with incorrect -l option'); # -m command_fails_like( - [ 'pg_resetwal', '-m', 'foo', $node->data_dir ], + [ 'pg_resetwal', '-m' => 'foo', $node->data_dir ], qr/error: invalid argument for option -m/, 'fails with incorrect -m option'); command_fails_like( - [ 'pg_resetwal', '-m', '10,bar', $node->data_dir ], + [ 'pg_resetwal', '-m' => '10,bar', $node->data_dir ], qr/error: invalid argument for option -m/, 'fails with incorrect -m option part 2'); command_fails_like( - [ 'pg_resetwal', '-m', '0,10', $node->data_dir ], + [ 'pg_resetwal', '-m' => '0,10', $node->data_dir ], qr/must not be 0/, 'fails with -m value 0 part 1'); command_fails_like( - [ 'pg_resetwal', '-m', '10,0', $node->data_dir ], + [ 'pg_resetwal', '-m' => '10,0', $node->data_dir ], qr/must not be 0/, 'fails with -m value 0 part 2'); # -o command_fails_like( - [ 'pg_resetwal', '-o', 'foo', $node->data_dir ], + [ 'pg_resetwal', '-o' => 'foo', $node->data_dir ], qr/error: invalid argument for option -o/, 'fails with incorrect -o option'); command_fails_like( - [ 'pg_resetwal', '-o', '0', $node->data_dir ], + [ 'pg_resetwal', '-o' => '0', $node->data_dir ], qr/must not be 0/, 'fails with -o value 0'); # -O command_fails_like( - [ 'pg_resetwal', '-O', 'foo', $node->data_dir ], + [ 'pg_resetwal', '-O' => 'foo', $node->data_dir ], qr/error: invalid argument for option -O/, 'fails with incorrect -O option'); command_fails_like( - [ 'pg_resetwal', '-O', '-1', $node->data_dir ], + [ 'pg_resetwal', '-O' => '-1', $node->data_dir ], qr/must not be -1/, 'fails with -O value -1'); # --wal-segsize command_fails_like( - [ 'pg_resetwal', '--wal-segsize', 'foo', $node->data_dir ], + [ 'pg_resetwal', '--wal-segsize' => 'foo', $node->data_dir ], qr/error: invalid value/, 'fails with incorrect --wal-segsize option'); command_fails_like( - [ 'pg_resetwal', '--wal-segsize', '13', $node->data_dir ], + [ 'pg_resetwal', '--wal-segsize' => '13', $node->data_dir ], qr/must be a power/, 'fails with invalid --wal-segsize value'); # -u command_fails_like( - [ 'pg_resetwal', '-u', 'foo', $node->data_dir ], + [ 'pg_resetwal', '-u' => 'foo', $node->data_dir ], qr/error: invalid argument for option -u/, 'fails with incorrect -u option'); command_fails_like( - [ 'pg_resetwal', '-u', '1', $node->data_dir ], + [ 'pg_resetwal', '-u' => '1', $node->data_dir ], qr/must be greater than/, 'fails with -u value too small'); # -x command_fails_like( - [ 'pg_resetwal', '-x', 'foo', $node->data_dir ], + [ 'pg_resetwal', '-x' => 'foo', $node->data_dir ], qr/error: invalid argument for option -x/, 'fails with incorrect -x option'); command_fails_like( - [ 'pg_resetwal', '-x', '1', $node->data_dir ], + [ 'pg_resetwal', '-x' => '1', $node->data_dir ], qr/must be greater than/, 'fails with -x value too small'); # run with control override options -my $out = (run_command([ 'pg_resetwal', '-n', $node->data_dir ]))[0]; +my $out = (run_command([ 'pg_resetwal', '--dry-run', $node->data_dir ]))[0]; $out =~ /^Database block size: *(\d+)$/m or die; my $blcksz = $1; -my @cmd = ('pg_resetwal', '-D', $node->data_dir); +my @cmd = ('pg_resetwal', '--pgdata' => $node->data_dir); # some not-so-critical hardcoded values -push @cmd, '-e', 1; -push @cmd, '-l', '00000001000000320000004B'; -push @cmd, '-o', 100_000; -push @cmd, '--wal-segsize', 1; +push @cmd, '--epoch' => 1; +push @cmd, '--next-wal-file' => '00000001000000320000004B'; +push @cmd, '--next-oid' => 100_000; +push @cmd, '--wal-segsize' => 1; # these use the guidance from the documentation @@ -202,31 +203,33 @@ my (@files, $mult); # XXX: Should there be a multiplier, similar to the other options? # -c argument is "old,new" push @cmd, - '-c', + '--commit-timestamp-ids' => sprintf("%d,%d", hex($files[0]) == 0 ? 3 : hex($files[0]), hex($files[-1])); @files = get_slru_files('pg_multixact/offsets'); $mult = 32 * $blcksz / 4; -# -m argument is "new,old" -push @cmd, '-m', - sprintf("%d,%d", +# --multixact-ids argument is "new,old" +push @cmd, + '--multixact-ids' => sprintf("%d,%d", (hex($files[-1]) + 1) * $mult, hex($files[0]) == 0 ? 1 : hex($files[0] * $mult)); @files = get_slru_files('pg_multixact/members'); $mult = 32 * int($blcksz / 20) * 4; -push @cmd, '-O', (hex($files[-1]) + 1) * $mult; +push @cmd, '--multixact-offset' => (hex($files[-1]) + 1) * $mult; @files = get_slru_files('pg_xact'); $mult = 32 * $blcksz * 4; push @cmd, - '-u', (hex($files[0]) == 0 ? 3 : hex($files[0]) * $mult), - '-x', ((hex($files[-1]) + 1) * $mult); + '--oldest-transaction-id' => + (hex($files[0]) == 0 ? 3 : hex($files[0]) * $mult), + '--next-transaction-id' => ((hex($files[-1]) + 1) * $mult); -command_ok([ @cmd, '-n' ], 'runs with control override options, dry run'); +command_ok([ @cmd, '--dry-run' ], + 'runs with control override options, dry run'); command_ok(\@cmd, 'runs with control override options'); command_like( - [ 'pg_resetwal', '-n', $node->data_dir ], + [ 'pg_resetwal', '--dry-run', $node->data_dir ], qr/^Latest checkpoint's NextOID: *100000$/m, 'spot check that control changes were applied'); diff --git a/src/bin/pg_resetwal/t/002_corrupted.pl b/src/bin/pg_resetwal/t/002_corrupted.pl index 02e3febcd5f..869d5d8d2a6 100644 --- a/src/bin/pg_resetwal/t/002_corrupted.pl +++ b/src/bin/pg_resetwal/t/002_corrupted.pl @@ -31,7 +31,7 @@ print $fh pack("x[$size]"); close $fh; command_checks_all( - [ 'pg_resetwal', '-n', $node->data_dir ], + [ 'pg_resetwal', '--dry-run', $node->data_dir ], 0, [qr/pg_control version number/], [ @@ -47,7 +47,7 @@ print $fh $data, pack("x[" . ($size - 16) . "]"); close $fh; command_checks_all( - [ 'pg_resetwal', '-n', $node->data_dir ], + [ 'pg_resetwal', '--dry-run', $node->data_dir ], 0, [qr/pg_control version number/], [ diff --git a/src/bin/pg_rewind/t/001_basic.pl b/src/bin/pg_rewind/t/001_basic.pl index 7e971ded971..031060db842 100644 --- a/src/bin/pg_rewind/t/001_basic.pl +++ b/src/bin/pg_rewind/t/001_basic.pl @@ -106,8 +106,8 @@ sub run_test command_fails( [ 'pg_rewind', '--debug', - '--source-pgdata', $standby_pgdata, - '--target-pgdata', $primary_pgdata, + '--source-pgdata' => $standby_pgdata, + '--target-pgdata' => $primary_pgdata, '--no-sync' ], 'pg_rewind with running target'); @@ -118,8 +118,8 @@ sub run_test command_fails( [ 'pg_rewind', '--debug', - '--source-pgdata', $standby_pgdata, - '--target-pgdata', $primary_pgdata, + '--source-pgdata' => $standby_pgdata, + '--target-pgdata' => $primary_pgdata, '--no-sync', '--no-ensure-shutdown' ], 'pg_rewind --no-ensure-shutdown with running target'); @@ -131,8 +131,8 @@ sub run_test command_fails( [ 'pg_rewind', '--debug', - '--source-pgdata', $standby_pgdata, - '--target-pgdata', $primary_pgdata, + '--source-pgdata' => $standby_pgdata, + '--target-pgdata' => $primary_pgdata, '--no-sync', '--no-ensure-shutdown' ], 'pg_rewind with unexpected running source'); @@ -145,8 +145,8 @@ sub run_test command_ok( [ 'pg_rewind', '--debug', - '--source-pgdata', $standby_pgdata, - '--target-pgdata', $primary_pgdata, + '--source-pgdata' => $standby_pgdata, + '--target-pgdata' => $primary_pgdata, '--no-sync', '--dry-run' ], 'pg_rewind --dry-run'); diff --git a/src/bin/pg_rewind/t/006_options.pl b/src/bin/pg_rewind/t/006_options.pl index a01e47a4e39..b7a84ac4d7a 100644 --- a/src/bin/pg_rewind/t/006_options.pl +++ b/src/bin/pg_rewind/t/006_options.pl @@ -17,27 +17,30 @@ my $primary_pgdata = PostgreSQL::Test::Utils::tempdir; my $standby_pgdata = PostgreSQL::Test::Utils::tempdir; command_fails( [ - 'pg_rewind', '--debug', - '--target-pgdata', $primary_pgdata, - '--source-pgdata', $standby_pgdata, + 'pg_rewind', + '--debug', + '--target-pgdata' => $primary_pgdata, + '--source-pgdata' => $standby_pgdata, 'extra_arg1' ], 'too many arguments'); -command_fails([ 'pg_rewind', '--target-pgdata', $primary_pgdata ], +command_fails([ 'pg_rewind', '--target-pgdata' => $primary_pgdata ], 'no source specified'); command_fails( [ - 'pg_rewind', '--debug', - '--target-pgdata', $primary_pgdata, - '--source-pgdata', $standby_pgdata, - '--source-server', 'incorrect_source' + 'pg_rewind', + '--debug', + '--target-pgdata' => $primary_pgdata, + '--source-pgdata' => $standby_pgdata, + '--source-server' => 'incorrect_source' ], 'both remote and local sources specified'); command_fails( [ - 'pg_rewind', '--debug', - '--target-pgdata', $primary_pgdata, - '--source-pgdata', $standby_pgdata, + 'pg_rewind', + '--debug', + '--target-pgdata' => $primary_pgdata, + '--source-pgdata' => $standby_pgdata, '--write-recovery-conf' ], 'no local source with --write-recovery-conf'); diff --git a/src/bin/pg_rewind/t/007_standby_source.pl b/src/bin/pg_rewind/t/007_standby_source.pl index 8468856e68c..583e551a3e8 100644 --- a/src/bin/pg_rewind/t/007_standby_source.pl +++ b/src/bin/pg_rewind/t/007_standby_source.pl @@ -124,10 +124,12 @@ copy( # recovery configuration automatically. command_ok( [ - 'pg_rewind', "--debug", - "--source-server", $node_b->connstr('postgres'), - "--target-pgdata=$node_c_pgdata", "--no-sync", - "--write-recovery-conf" + 'pg_rewind', + '--debug', + '--source-server' => $node_b->connstr('postgres'), + '--target-pgdata' => $node_c_pgdata, + '--no-sync', + '--write-recovery-conf', ], 'pg_rewind remote'); } diff --git a/src/bin/pg_rewind/t/008_min_recovery_point.pl b/src/bin/pg_rewind/t/008_min_recovery_point.pl index 2f64b655ee7..28496afe350 100644 --- a/src/bin/pg_rewind/t/008_min_recovery_point.pl +++ b/src/bin/pg_rewind/t/008_min_recovery_point.pl @@ -142,8 +142,10 @@ copy( command_ok( [ - 'pg_rewind', "--source-server=$node_1_connstr", - "--target-pgdata=$node_2_pgdata", "--debug" + 'pg_rewind', + '--source-server' => $node_1_connstr, + '--target-pgdata' => $node_2_pgdata, + '--debug', ], 'run pg_rewind'); diff --git a/src/bin/pg_rewind/t/009_growing_files.pl b/src/bin/pg_rewind/t/009_growing_files.pl index 7552c9e3e1f..643d200dcc9 100644 --- a/src/bin/pg_rewind/t/009_growing_files.pl +++ b/src/bin/pg_rewind/t/009_growing_files.pl @@ -52,8 +52,8 @@ append_to_file "$standby_pgdata/tst_both_dir/file1", 'a'; my $ret = run_log( [ 'pg_rewind', '--debug', - '--source-pgdata', $standby_pgdata, - '--target-pgdata', $primary_pgdata, + '--source-pgdata' => $standby_pgdata, + '--target-pgdata' => $primary_pgdata, '--no-sync', ], '2>>', diff --git a/src/bin/pg_rewind/t/010_keep_recycled_wals.pl b/src/bin/pg_rewind/t/010_keep_recycled_wals.pl index 4f962b728a1..55bf046d9c1 100644 --- a/src/bin/pg_rewind/t/010_keep_recycled_wals.pl +++ b/src/bin/pg_rewind/t/010_keep_recycled_wals.pl @@ -49,8 +49,8 @@ $node_primary->stop(); my ($stdout, $stderr) = run_command( [ 'pg_rewind', '--debug', - '--source-pgdata', $node_standby->data_dir, - '--target-pgdata', $node_primary->data_dir, + '--source-pgdata' => $node_standby->data_dir, + '--target-pgdata' => $node_primary->data_dir, '--no-sync', ]); diff --git a/src/bin/pg_rewind/t/RewindTest.pm b/src/bin/pg_rewind/t/RewindTest.pm index 45e0d8d390a..6115ec21eb9 100644 --- a/src/bin/pg_rewind/t/RewindTest.pm +++ b/src/bin/pg_rewind/t/RewindTest.pm @@ -255,12 +255,11 @@ sub run_pg_rewind command_ok( [ 'pg_rewind', - "--debug", - "--source-pgdata=$standby_pgdata", - "--target-pgdata=$primary_pgdata", - "--no-sync", - "--config-file", - "$tmp_folder/primary-postgresql.conf.tmp" + '--debug', + '--source-pgdata' => $standby_pgdata, + '--target-pgdata' => $primary_pgdata, + '--no-sync', + '--config-file' => "$tmp_folder/primary-postgresql.conf.tmp", ], 'pg_rewind local'); } @@ -270,11 +269,13 @@ sub run_pg_rewind # recovery configuration automatically. command_ok( [ - 'pg_rewind', "--debug", - "--source-server", $standby_connstr, - "--target-pgdata=$primary_pgdata", "--no-sync", - "--write-recovery-conf", "--config-file", - "$tmp_folder/primary-postgresql.conf.tmp" + 'pg_rewind', + '--debug', + '--source-server' => $standby_connstr, + '--target-pgdata' => $primary_pgdata, + '--no-sync', + '--write-recovery-conf', + '--config-file' => "$tmp_folder/primary-postgresql.conf.tmp", ], 'pg_rewind remote'); @@ -327,14 +328,13 @@ sub run_pg_rewind command_ok( [ 'pg_rewind', - "--debug", - "--source-pgdata=$standby_pgdata", - "--target-pgdata=$primary_pgdata", - "--no-sync", - "--no-ensure-shutdown", - "--restore-target-wal", - "--config-file", - "$primary_pgdata/postgresql.conf" + '--debug', + '--source-pgdata' => $standby_pgdata, + '--target-pgdata' => $primary_pgdata, + '--no-sync', + '--no-ensure-shutdown', + '--restore-target-wal', + '--config-file' => "$primary_pgdata/postgresql.conf", ], 'pg_rewind archive'); } diff --git a/src/bin/pg_test_fsync/t/001_basic.pl b/src/bin/pg_test_fsync/t/001_basic.pl index b275d838215..7eed51233c4 100644 --- a/src/bin/pg_test_fsync/t/001_basic.pl +++ b/src/bin/pg_test_fsync/t/001_basic.pl @@ -18,11 +18,11 @@ program_options_handling_ok('pg_test_fsync'); # Test invalid option combinations command_fails_like( - [ 'pg_test_fsync', '--secs-per-test', 'a' ], + [ 'pg_test_fsync', '--secs-per-test' => 'a' ], qr/\Qpg_test_fsync: error: invalid argument for option --secs-per-test\E/, 'pg_test_fsync: invalid argument for option --secs-per-test'); command_fails_like( - [ 'pg_test_fsync', '--secs-per-test', '0' ], + [ 'pg_test_fsync', '--secs-per-test' => '0' ], qr/\Qpg_test_fsync: error: --secs-per-test must be in range 1..4294967295\E/, 'pg_test_fsync: --secs-per-test must be in range'); diff --git a/src/bin/pg_test_timing/t/001_basic.pl b/src/bin/pg_test_timing/t/001_basic.pl index d59def3f873..6554cd981af 100644 --- a/src/bin/pg_test_timing/t/001_basic.pl +++ b/src/bin/pg_test_timing/t/001_basic.pl @@ -18,11 +18,11 @@ program_options_handling_ok('pg_test_timing'); # Test invalid option combinations command_fails_like( - [ 'pg_test_timing', '--duration', 'a' ], + [ 'pg_test_timing', '--duration' => 'a' ], qr/\Qpg_test_timing: invalid argument for option --duration\E/, 'pg_test_timing: invalid argument for option --duration'); command_fails_like( - [ 'pg_test_timing', '--duration', '0' ], + [ 'pg_test_timing', '--duration' => '0' ], qr/\Qpg_test_timing: --duration must be in range 1..4294967295\E/, 'pg_test_timing: --duration must be in range'); diff --git a/src/bin/pg_upgrade/t/004_subscription.pl b/src/bin/pg_upgrade/t/004_subscription.pl index 661a2715c6f..13773316e1d 100644 --- a/src/bin/pg_upgrade/t/004_subscription.pl +++ b/src/bin/pg_upgrade/t/004_subscription.pl @@ -58,11 +58,17 @@ $new_sub->append_conf('postgresql.conf', "max_replication_slots = 0"); # max_replication_slots. command_checks_all( [ - 'pg_upgrade', '--no-sync', '-d', $old_sub->data_dir, - '-D', $new_sub->data_dir, '-b', $oldbindir, - '-B', $newbindir, '-s', $new_sub->host, - '-p', $old_sub->port, '-P', $new_sub->port, - $mode, '--check', + 'pg_upgrade', + '--no-sync', + '--old-datadir' => $old_sub->data_dir, + '--new-datadir' => $new_sub->data_dir, + '--old-bindir' => $oldbindir, + '--new-bindir' => $newbindir, + '--socketdir' => $new_sub->host, + '--old-port' => $old_sub->port, + '--new-port' => $new_sub->port, + $mode, + '--check', ], 1, [ @@ -126,11 +132,17 @@ $old_sub->stop; command_fails( [ - 'pg_upgrade', '--no-sync', '-d', $old_sub->data_dir, - '-D', $new_sub->data_dir, '-b', $oldbindir, - '-B', $newbindir, '-s', $new_sub->host, - '-p', $old_sub->port, '-P', $new_sub->port, - $mode, '--check', + 'pg_upgrade', + '--no-sync', + '--old-datadir' => $old_sub->data_dir, + '--new-datadir' => $new_sub->data_dir, + '--old-bindir' => $oldbindir, + '--new-bindir' => $newbindir, + '--socketdir' => $new_sub->host, + '--old-port' => $old_sub->port, + '--new-port' => $new_sub->port, + $mode, + '--check', ], 'run of pg_upgrade --check for old instance with relation in \'d\' datasync(invalid) state and missing replication origin' ); @@ -254,10 +266,15 @@ $new_sub->append_conf('postgresql.conf', # ------------------------------------------------------ command_ok( [ - 'pg_upgrade', '--no-sync', '-d', $old_sub->data_dir, - '-D', $new_sub->data_dir, '-b', $oldbindir, - '-B', $newbindir, '-s', $new_sub->host, - '-p', $old_sub->port, '-P', $new_sub->port, + 'pg_upgrade', + '--no-sync', + '--old-datadir' => $old_sub->data_dir, + '--new-datadir' => $new_sub->data_dir, + '--old-bindir' => $oldbindir, + '--new-bindir' => $newbindir, + '--socketdir' => $new_sub->host, + '--old-port' => $old_sub->port, + '--new-port' => $new_sub->port, $mode ], 'run of pg_upgrade for old instance when the subscription tables are in init/ready state' diff --git a/src/bin/pg_verifybackup/t/001_basic.pl b/src/bin/pg_verifybackup/t/001_basic.pl index 5ff7f26f0d6..ded3011df5f 100644 --- a/src/bin/pg_verifybackup/t/001_basic.pl +++ b/src/bin/pg_verifybackup/t/001_basic.pl @@ -31,7 +31,11 @@ close($fh); # but then try to use an alternate, nonexisting manifest command_fails_like( - [ 'pg_verifybackup', '-m', "$tempdir/not_the_manifest", $tempdir ], + [ + 'pg_verifybackup', + '--manifest-path' => "$tempdir/not_the_manifest", + $tempdir, + ], qr/could not open file.*\/not_the_manifest\"/, 'pg_verifybackup respects -m flag'); diff --git a/src/bin/pg_verifybackup/t/003_corruption.pl b/src/bin/pg_verifybackup/t/003_corruption.pl index a2aa767cff6..8ef7f8a4e7a 100644 --- a/src/bin/pg_verifybackup/t/003_corruption.pl +++ b/src/bin/pg_verifybackup/t/003_corruption.pl @@ -125,8 +125,12 @@ for my $scenario (@scenario) local $ENV{MSYS2_ARG_CONV_EXCL} = $source_ts_prefix; $primary->command_ok( [ - 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast', - '-T', "${source_ts_path}=${backup_ts_path}" + 'pg_basebackup', + '--pgdata' => $backup_path, + '--no-sync', + '--checkpoint' => 'fast', + '--tablespace-mapping' => + "${source_ts_path}=${backup_ts_path}", ], "base backup ok"); command_ok([ 'pg_verifybackup', $backup_path ], diff --git a/src/bin/pg_verifybackup/t/004_options.pl b/src/bin/pg_verifybackup/t/004_options.pl index e6d94b9ad51..52660786680 100644 --- a/src/bin/pg_verifybackup/t/004_options.pl +++ b/src/bin/pg_verifybackup/t/004_options.pl @@ -16,33 +16,45 @@ $primary->init(allows_streaming => 1); $primary->start; my $backup_path = $primary->backup_dir . '/test_options'; $primary->command_ok( - [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ], + [ + 'pg_basebackup', + '--pgdata' => $backup_path, + '--no-sync', + '--checkpoint' => 'fast' + ], "base backup ok"); -# Verify that pg_verifybackup -q succeeds and produces no output. +# Verify that pg_verifybackup --quiet succeeds and produces no output. my $stdout; my $stderr; -my $result = IPC::Run::run [ 'pg_verifybackup', '-q', $backup_path ], - '>', \$stdout, '2>', \$stderr; -ok($result, "-q succeeds: exit code 0"); -is($stdout, '', "-q succeeds: no stdout"); -is($stderr, '', "-q succeeds: no stderr"); +my $result = IPC::Run::run [ 'pg_verifybackup', '--quiet', $backup_path ], + '>' => \$stdout, + '2>' => \$stderr; +ok($result, "--quiet succeeds: exit code 0"); +is($stdout, '', "--quiet succeeds: no stdout"); +is($stderr, '', "--quiet succeeds: no stderr"); -# Should still work if we specify -Fp. -$primary->command_ok([ 'pg_verifybackup', '-Fp', $backup_path ], - "verifies with -Fp"); +# Should still work if we specify --format=plain. +$primary->command_ok( + [ 'pg_verifybackup', '--format' => 'plain', $backup_path ], + "verifies with --format=plain"); -# Should not work if we specify -Fy because that's invalid. +# Should not work if we specify --format=y because that's invalid. $primary->command_fails_like( - [ 'pg_verifybackup', '-Fy', $backup_path ], + [ 'pg_verifybackup', '--format' => 'y', $backup_path ], qr(invalid backup format "y", must be "plain" or "tar"), - "does not verify with -Fy"); + "does not verify with --format=y"); # Should produce a lengthy list of errors; we test for just one of those. $primary->command_fails_like( - [ 'pg_verifybackup', '-Ft', '-n', $backup_path ], + [ + 'pg_verifybackup', + '--format' => 'tar', + '--no-parse-wal', + $backup_path + ], qr("pg_multixact" is not a plain file), - "does not verify with -Ft -n"); + "does not verify with --format=tar --no-parse-wal"); # Test invalid options command_fails_like( @@ -59,25 +71,30 @@ close($fh); # Verify that pg_verifybackup -q now fails. command_fails_like( - [ 'pg_verifybackup', '-q', $backup_path ], + [ 'pg_verifybackup', '--quiet', $backup_path ], qr/checksum mismatch for file \"PG_VERSION\"/, - '-q checksum mismatch'); + '--quiet checksum mismatch'); # Since we didn't change the length of the file, verification should succeed # if we ignore checksums. Check that we get the right message, too. command_like( - [ 'pg_verifybackup', '-s', $backup_path ], + [ 'pg_verifybackup', '--skip-checksums', $backup_path ], qr/backup successfully verified/, - '-s skips checksumming'); + '--skip-checksums skips checksumming'); # Validation should succeed if we ignore the problem file. Also, check # the progress information. command_checks_all( - [ 'pg_verifybackup', '--progress', '-i', 'PG_VERSION', $backup_path ], + [ + 'pg_verifybackup', + '--progress', + '--ignore' => 'PG_VERSION', + $backup_path + ], 0, [qr/backup successfully verified/], [qr{(\d+/\d+ kB \(\d+%\) verified)+}], - '-i ignores problem file'); + '--ignore ignores problem file'); # PG_VERSION is already corrupt; let's try also removing all of pg_xact. rmtree($backup_path . "/pg_xact"); @@ -85,17 +102,22 @@ rmtree($backup_path . "/pg_xact"); # We're ignoring the problem with PG_VERSION, but not the problem with # pg_xact, so verification should fail here. command_fails_like( - [ 'pg_verifybackup', '-i', 'PG_VERSION', $backup_path ], + [ 'pg_verifybackup', '--ignore' => 'PG_VERSION', $backup_path ], qr/pg_xact.*is present in the manifest but not on disk/, - '-i does not ignore all problems'); + '--ignore does not ignore all problems'); -# If we use -i twice, we should be able to ignore all of the problems. +# If we use --ignore twice, we should be able to ignore all of the problems. command_like( - [ 'pg_verifybackup', '-i', 'PG_VERSION', '-i', 'pg_xact', $backup_path ], + [ + 'pg_verifybackup', + '--ignore' => 'PG_VERSION', + '--ignore' => 'pg_xact', + $backup_path + ], qr/backup successfully verified/, - 'multiple -i options work'); + 'multiple --ignore options work'); -# Verify that when -i is not used, both problems are reported. +# Verify that when --ignore is not used, both problems are reported. $result = IPC::Run::run [ 'pg_verifybackup', $backup_path ], '>', \$stdout, '2>', \$stderr; ok(!$result, "multiple problems: fails"); @@ -108,24 +130,28 @@ like( qr/checksum mismatch for file \"PG_VERSION\"/, "multiple problems: checksum mismatch reported"); -# Verify that when -e is used, only the problem detected first is reported. -$result = IPC::Run::run [ 'pg_verifybackup', '-e', $backup_path ], - '>', \$stdout, '2>', \$stderr; -ok(!$result, "-e reports 1 error: fails"); +# Verify that when --exit-on-error is used, only the problem detected +# first is reported. +$result = + IPC::Run::run [ 'pg_verifybackup', '--exit-on-error', $backup_path ], + '>' => \$stdout, + '2>' => \$stderr; +ok(!$result, "--exit-on-error reports 1 error: fails"); like( $stderr, qr/pg_xact.*is present in the manifest but not on disk/, - "-e reports 1 error: missing files reported"); + "--exit-on-error reports 1 error: missing files reported"); unlike( $stderr, qr/checksum mismatch for file \"PG_VERSION\"/, - "-e reports 1 error: checksum mismatch not reported"); + "--exit-on-error reports 1 error: checksum mismatch not reported"); # Test valid manifest with nonexistent backup directory. command_fails_like( [ - 'pg_verifybackup', '-m', - "$backup_path/backup_manifest", "$backup_path/fake" + 'pg_verifybackup', + '--manifest-path' => "$backup_path/backup_manifest", + "$backup_path/fake" ], qr/could not open directory/, 'nonexistent backup directory'); diff --git a/src/bin/pg_verifybackup/t/006_encoding.pl b/src/bin/pg_verifybackup/t/006_encoding.pl index 90c05e69189..c243153c5f9 100644 --- a/src/bin/pg_verifybackup/t/006_encoding.pl +++ b/src/bin/pg_verifybackup/t/006_encoding.pl @@ -15,9 +15,11 @@ $primary->start; my $backup_path = $primary->backup_dir . '/test_encoding'; $primary->command_ok( [ - 'pg_basebackup', '-D', - $backup_path, '--no-sync', - '-cfast', '--manifest-force-encode' + 'pg_basebackup', + '--pgdata' => $backup_path, + '--no-sync', + '--checkpoint' => 'fast', + '--manifest-force-encode', ], "backup ok with forced hex encoding"); @@ -27,7 +29,7 @@ cmp_ok($count_of_encoded_path_in_manifest, '>', 100, "many paths are encoded in the manifest"); command_like( - [ 'pg_verifybackup', '-s', $backup_path ], + [ 'pg_verifybackup', '--skip-checksums', $backup_path ], qr/backup successfully verified/, 'backup with forced encoding verified'); diff --git a/src/bin/pg_verifybackup/t/007_wal.pl b/src/bin/pg_verifybackup/t/007_wal.pl index de6ef13d083..babc4f0a86b 100644 --- a/src/bin/pg_verifybackup/t/007_wal.pl +++ b/src/bin/pg_verifybackup/t/007_wal.pl @@ -15,7 +15,12 @@ $primary->init(allows_streaming => 1); $primary->start; my $backup_path = $primary->backup_dir . '/test_wal'; $primary->command_ok( - [ 'pg_basebackup', '-D', $backup_path, '--no-sync', '-cfast' ], + [ + 'pg_basebackup', + '--pgdata' => $backup_path, + '--no-sync', + '--checkpoint' => 'fast' + ], "base backup ok"); # Rename pg_wal. @@ -30,13 +35,17 @@ command_fails_like( 'missing pg_wal causes failure'); # Should work if we skip WAL verification. -command_ok( - [ 'pg_verifybackup', '-n', $backup_path ], +command_ok([ 'pg_verifybackup', '--no-parse-wal', $backup_path ], 'missing pg_wal OK if not verifying WAL'); # Should also work if we specify the correct WAL location. -command_ok([ 'pg_verifybackup', '-w', $relocated_pg_wal, $backup_path ], - '-w can be used to specify WAL directory'); +command_ok( + [ + 'pg_verifybackup', + '--wal-directory' => $relocated_pg_wal, + $backup_path + ], + '--wal-directory can be used to specify WAL directory'); # Move directory back to original location. rename($relocated_pg_wal, $original_pg_wal) || die "rename pg_wal back: $!"; @@ -70,7 +79,12 @@ my $backup_path2 = $primary->backup_dir . '/test_tli'; # The base backup run below does a checkpoint, that removes the first segment # of the current timeline. $primary->command_ok( - [ 'pg_basebackup', '-D', $backup_path2, '--no-sync', '-cfast' ], + [ + 'pg_basebackup', + '--pgdata' => $backup_path2, + '--no-sync', + '--checkpoint' => 'fast' + ], "base backup 2 ok"); command_ok( [ 'pg_verifybackup', $backup_path2 ], diff --git a/src/bin/pg_verifybackup/t/010_client_untar.pl b/src/bin/pg_verifybackup/t/010_client_untar.pl index 723f5f16c10..4559c5c75e8 100644 --- a/src/bin/pg_verifybackup/t/010_client_untar.pl +++ b/src/bin/pg_verifybackup/t/010_client_untar.pl @@ -108,7 +108,11 @@ for my $tc (@test_configuration) "found expected backup files, compression $method"); # Verify tar backup. - $primary->command_ok([ 'pg_verifybackup', '-n', '-e', $backup_path ], + $primary->command_ok( + [ + 'pg_verifybackup', '--no-parse-wal', + '--exit-on-error', $backup_path, + ], "verify backup, compression $method"); # Cleanup. diff --git a/src/bin/pg_waldump/t/001_basic.pl b/src/bin/pg_waldump/t/001_basic.pl index 8d574a410cf..5c8fea275bb 100644 --- a/src/bin/pg_waldump/t/001_basic.pl +++ b/src/bin/pg_waldump/t/001_basic.pl @@ -21,31 +21,31 @@ command_fails_like( # invalid option arguments command_fails_like( - [ 'pg_waldump', '--block', 'bad' ], + [ 'pg_waldump', '--block' => 'bad' ], qr/error: invalid block number/, 'invalid block number'); command_fails_like( - [ 'pg_waldump', '--fork', 'bad' ], + [ 'pg_waldump', '--fork' => 'bad' ], qr/error: invalid fork name/, 'invalid fork name'); command_fails_like( - [ 'pg_waldump', '--limit', 'bad' ], + [ 'pg_waldump', '--limit' => 'bad' ], qr/error: invalid value/, 'invalid limit'); command_fails_like( - [ 'pg_waldump', '--relation', 'bad' ], + [ 'pg_waldump', '--relation' => 'bad' ], qr/error: invalid relation/, 'invalid relation specification'); command_fails_like( - [ 'pg_waldump', '--rmgr', 'bad' ], + [ 'pg_waldump', '--rmgr' => 'bad' ], qr/error: resource manager .* does not exist/, 'invalid rmgr name'); command_fails_like( - [ 'pg_waldump', '--start', 'bad' ], + [ 'pg_waldump', '--start' => 'bad' ], qr/error: invalid WAL location/, 'invalid start LSN'); command_fails_like( - [ 'pg_waldump', '--end', 'bad' ], + [ 'pg_waldump', '--end' => 'bad' ], qr/error: invalid WAL location/, 'invalid end LSN'); @@ -199,18 +199,24 @@ command_like( qr/./, 'runs with start and end segment specified'); command_fails_like( - [ 'pg_waldump', '-p', $node->data_dir ], + [ 'pg_waldump', '--path' => $node->data_dir ], qr/error: no start WAL location given/, 'path option requires start location'); command_like( [ - 'pg_waldump', '-p', $node->data_dir, '--start', - $start_lsn, '--end', $end_lsn + 'pg_waldump', + '--path' => $node->data_dir, + '--start' => $start_lsn, + '--end' => $end_lsn, ], qr/./, 'runs with path option and start and end locations'); command_fails_like( - [ 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn ], + [ + 'pg_waldump', + '--path' => $node->data_dir, + '--start' => $start_lsn, + ], qr/error: error in WAL record at/, 'falling off the end of the WAL results in an error'); @@ -222,7 +228,11 @@ command_like( qr/^$/, 'no output with --quiet option'); command_fails_like( - [ 'pg_waldump', '--quiet', '-p', $node->data_dir, '--start', $start_lsn ], + [ + 'pg_waldump', '--quiet', + '--path' => $node->data_dir, + '--start' => $start_lsn + ], qr/error: error in WAL record at/, 'errors are shown with --quiet'); @@ -240,7 +250,8 @@ command_fails_like( my (@cmd, $stdout, $stderr, $result); @cmd = ( - 'pg_waldump', '--start', $new_start, + 'pg_waldump', + '--start' => $new_start, $node->data_dir . '/pg_wal/' . $start_walfile); $result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr; ok($result, "runs with start segment and start LSN specified"); @@ -258,8 +269,10 @@ sub test_pg_waldump my (@cmd, $stdout, $stderr, $result, @lines); @cmd = ( - 'pg_waldump', '-p', $node->data_dir, '--start', $start_lsn, '--end', - $end_lsn); + 'pg_waldump', + '--path' => $node->data_dir, + '--start' => $start_lsn, + '--end' => $end_lsn); push @cmd, @opts; $result = IPC::Run::run \@cmd, '>', \$stdout, '2>', \$stderr; ok($result, "pg_waldump @opts: runs ok"); @@ -274,7 +287,7 @@ my @lines; @lines = test_pg_waldump; is(grep(!/^rmgr: \w/, @lines), 0, 'all output lines are rmgr lines'); -@lines = test_pg_waldump('--limit', 6); +@lines = test_pg_waldump('--limit' => 6); is(@lines, 6, 'limit option observed'); @lines = test_pg_waldump('--fullpage'); @@ -288,21 +301,20 @@ is(grep(/^rmgr:/, @lines), 0, 'no rmgr lines output'); like($lines[0], qr/WAL statistics/, "statistics on stdout"); is(grep(/^rmgr:/, @lines), 0, 'no rmgr lines output'); -@lines = test_pg_waldump('--rmgr', 'Btree'); +@lines = test_pg_waldump('--rmgr' => 'Btree'); is(grep(!/^rmgr: Btree/, @lines), 0, 'only Btree lines'); -@lines = test_pg_waldump('--fork', 'init'); +@lines = test_pg_waldump('--fork' => 'init'); is(grep(!/fork init/, @lines), 0, 'only init fork lines'); -@lines = test_pg_waldump('--relation', - "$default_ts_oid/$postgres_db_oid/$rel_t1_oid"); +@lines = test_pg_waldump( + '--relation' => "$default_ts_oid/$postgres_db_oid/$rel_t1_oid"); is(grep(!/rel $default_ts_oid\/$postgres_db_oid\/$rel_t1_oid/, @lines), 0, 'only lines for selected relation'); -@lines = - test_pg_waldump('--relation', - "$default_ts_oid/$postgres_db_oid/$rel_i1a_oid", - '--block', 1); +@lines = test_pg_waldump( + '--relation' => "$default_ts_oid/$postgres_db_oid/$rel_i1a_oid", + '--block' => 1); is(grep(!/\bblk 1\b/, @lines), 0, 'only lines for selected block'); diff --git a/src/bin/pg_waldump/t/002_save_fullpage.pl b/src/bin/pg_waldump/t/002_save_fullpage.pl index 48a308314c6..17b15e3a649 100644 --- a/src/bin/pg_waldump/t/002_save_fullpage.pl +++ b/src/bin/pg_waldump/t/002_save_fullpage.pl @@ -71,9 +71,10 @@ ok(-f $walfile, "Got a WAL file"); $node->command_ok( [ - 'pg_waldump', '--quiet', - '--save-fullpage', "$tmp_folder/raw", - '--relation', $relation, + 'pg_waldump', + '--quiet', + '--save-fullpage' => "$tmp_folder/raw", + '--relation' => $relation, $walfile ], 'pg_waldump with --save-fullpage runs'); diff --git a/src/bin/pgbench/t/001_pgbench_with_server.pl b/src/bin/pgbench/t/001_pgbench_with_server.pl index 6fedf7038ef..8816af17ac1 100644 --- a/src/bin/pgbench/t/001_pgbench_with_server.pl +++ b/src/bin/pgbench/t/001_pgbench_with_server.pl @@ -213,7 +213,7 @@ my $nthreads = 2; { my ($stderr); - run_log([ 'pgbench', '-j', '2', '--bad-option' ], '2>', \$stderr); + run_log([ 'pgbench', '--jobs' => '2', '--bad-option' ], '2>', \$stderr); $nthreads = 1 if $stderr =~ m/threads are not supported on this platform/; } diff --git a/src/bin/psql/t/001_basic.pl b/src/bin/psql/t/001_basic.pl index 3c381a35060..3170bc86856 100644 --- a/src/bin/psql/t/001_basic.pl +++ b/src/bin/psql/t/001_basic.pl @@ -216,11 +216,12 @@ $node->safe_psql('postgres', "CREATE TABLE tab_psql_single (a int);"); # Tests with ON_ERROR_STOP. $node->command_ok( [ - 'psql', '-X', - '--single-transaction', '-v', - 'ON_ERROR_STOP=1', '-c', - 'INSERT INTO tab_psql_single VALUES (1)', '-c', - 'INSERT INTO tab_psql_single VALUES (2)' + 'psql', + '--no-psqlrc', + '--single-transaction', + '--set' => 'ON_ERROR_STOP=1', + '--command' => 'INSERT INTO tab_psql_single VALUES (1)', + '--command' => 'INSERT INTO tab_psql_single VALUES (2)', ], 'ON_ERROR_STOP, --single-transaction and multiple -c switches'); my $row_count = @@ -231,11 +232,12 @@ is($row_count, '2', $node->command_fails( [ - 'psql', '-X', - '--single-transaction', '-v', - 'ON_ERROR_STOP=1', '-c', - 'INSERT INTO tab_psql_single VALUES (3)', '-c', - "\\copy tab_psql_single FROM '$tempdir/nonexistent'" + 'psql', + '--no-psqlrc', + '--single-transaction', + '--set' => 'ON_ERROR_STOP=1', + '--command' => 'INSERT INTO tab_psql_single VALUES (3)', + '--command' => "\\copy tab_psql_single FROM '$tempdir/nonexistent'" ], 'ON_ERROR_STOP, --single-transaction and multiple -c switches, error'); $row_count = @@ -252,9 +254,12 @@ append_to_file($copy_sql_file, append_to_file($insert_sql_file, 'INSERT INTO tab_psql_single VALUES (4);'); $node->command_ok( [ - 'psql', '-X', '--single-transaction', '-v', - 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f', - $insert_sql_file + 'psql', + '--no-psqlrc', + '--single-transaction', + '--set' => 'ON_ERROR_STOP=1', + '--file' => $insert_sql_file, + '--file' => $insert_sql_file ], 'ON_ERROR_STOP, --single-transaction and multiple -f switches'); $row_count = @@ -265,9 +270,12 @@ is($row_count, '4', $node->command_fails( [ - 'psql', '-X', '--single-transaction', '-v', - 'ON_ERROR_STOP=1', '-f', $insert_sql_file, '-f', - $copy_sql_file + 'psql', + '--no-psqlrc', + '--single-transaction', + '--set' => 'ON_ERROR_STOP=1', + '--file' => $insert_sql_file, + '--file' => $copy_sql_file ], 'ON_ERROR_STOP, --single-transaction and multiple -f switches, error'); $row_count = @@ -281,11 +289,12 @@ is($row_count, '4', # transaction commits. $node->command_fails( [ - 'psql', '-X', - '--single-transaction', '-f', - $insert_sql_file, '-f', - $insert_sql_file, '-c', - "\\copy tab_psql_single FROM '$tempdir/nonexistent'" + 'psql', + '--no-psqlrc', + '--single-transaction', + '--file' => $insert_sql_file, + '--file' => $insert_sql_file, + '--command' => "\\copy tab_psql_single FROM '$tempdir/nonexistent'" ], 'no ON_ERROR_STOP, --single-transaction and multiple -f/-c switches'); $row_count = @@ -298,9 +307,12 @@ is($row_count, '6', # returns a success and the transaction commits. $node->command_ok( [ - 'psql', '-X', '--single-transaction', '-f', - $insert_sql_file, '-f', $insert_sql_file, '-f', - $copy_sql_file + 'psql', + '--no-psqlrc', + '--single-transaction', + '--file' => $insert_sql_file, + '--file' => $insert_sql_file, + '--file' => $copy_sql_file ], 'no ON_ERROR_STOP, --single-transaction and multiple -f switches'); $row_count = @@ -313,11 +325,12 @@ is($row_count, '8', # the transaction commit even if there is a failure in-between. $node->command_ok( [ - 'psql', '-X', - '--single-transaction', '-c', - 'INSERT INTO tab_psql_single VALUES (5)', '-f', - $copy_sql_file, '-c', - 'INSERT INTO tab_psql_single VALUES (6)' + 'psql', + '--no-psqlrc', + '--single-transaction', + '--command' => 'INSERT INTO tab_psql_single VALUES (5)', + '--file' => $copy_sql_file, + '--command' => 'INSERT INTO tab_psql_single VALUES (6)' ], 'no ON_ERROR_STOP, --single-transaction and multiple -c switches'); $row_count = diff --git a/src/bin/scripts/t/010_clusterdb.pl b/src/bin/scripts/t/010_clusterdb.pl index f42a26b22de..a4e4d468578 100644 --- a/src/bin/scripts/t/010_clusterdb.pl +++ b/src/bin/scripts/t/010_clusterdb.pl @@ -21,14 +21,14 @@ $node->issues_sql_like( qr/statement: CLUSTER;/, 'SQL CLUSTER run'); -$node->command_fails([ 'clusterdb', '-t', 'nonexistent' ], +$node->command_fails([ 'clusterdb', '--table' => 'nonexistent' ], 'fails with nonexistent table'); $node->safe_psql('postgres', 'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x' ); $node->issues_sql_like( - [ 'clusterdb', '-t', 'test1' ], + [ 'clusterdb', '--table' => 'test1' ], qr/statement: CLUSTER public\.test1;/, 'cluster specific table'); diff --git a/src/bin/scripts/t/011_clusterdb_all.pl b/src/bin/scripts/t/011_clusterdb_all.pl index beaf2930f0e..cf06c8c1f8e 100644 --- a/src/bin/scripts/t/011_clusterdb_all.pl +++ b/src/bin/scripts/t/011_clusterdb_all.pl @@ -15,7 +15,7 @@ $node->start; # clusterdb -a is not compatible with -d. This relies on PGDATABASE to be # set, something PostgreSQL::Test::Cluster does. $node->issues_sql_like( - [ 'clusterdb', '-a' ], + [ 'clusterdb', '--all' ], qr/statement: CLUSTER.*statement: CLUSTER/s, 'cluster all databases'); @@ -24,13 +24,13 @@ $node->safe_psql( CREATE DATABASE regression_invalid; UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid'; )); -$node->command_ok([ 'clusterdb', '-a' ], +$node->command_ok([ 'clusterdb', '--all' ], 'invalid database not targeted by clusterdb -a'); # Doesn't quite belong here, but don't want to waste time by creating an # invalid database in 010_clusterdb.pl as well. $node->command_fails_like( - [ 'clusterdb', '-d', 'regression_invalid' ], + [ 'clusterdb', '--dbname' => 'regression_invalid' ], qr/FATAL: cannot connect to invalid database "regression_invalid"/, 'clusterdb cannot target invalid database'); @@ -41,7 +41,7 @@ $node->safe_psql('template1', 'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a); CLUSTER test1 USING test1x' ); $node->issues_sql_like( - [ 'clusterdb', '-a', '-t', 'test1' ], + [ 'clusterdb', '--all', '--table' => 'test1' ], qr/statement: CLUSTER public\.test1/s, 'cluster specific table in all databases'); diff --git a/src/bin/scripts/t/020_createdb.pl b/src/bin/scripts/t/020_createdb.pl index 191c7885a8d..a8293390ede 100644 --- a/src/bin/scripts/t/020_createdb.pl +++ b/src/bin/scripts/t/020_createdb.pl @@ -21,7 +21,13 @@ $node->issues_sql_like( qr/statement: CREATE DATABASE foobar1/, 'SQL CREATE DATABASE run'); $node->issues_sql_like( - [ 'createdb', '-l', 'C', '-E', 'LATIN1', '-T', 'template0', 'foobar2' ], + [ + 'createdb', + '--locale' => 'C', + '--encoding' => 'LATIN1', + '--template' => 'template0', + 'foobar2', + ], qr/statement: CREATE DATABASE foobar2 ENCODING 'LATIN1'/, 'create database with encoding'); @@ -32,35 +38,45 @@ if ($ENV{with_icu} eq 'yes') # provider. XXX Maybe split into multiple tests? $node->command_fails( [ - 'createdb', '-T', 'template0', '-E', 'UTF8', - '--locale-provider=icu', 'foobar4' + 'createdb', + '--template' => 'template0', + '--encoding' => 'UTF8', + '--locale-provider' => 'icu', + 'foobar4', ], 'create database with ICU fails without ICU locale specified'); $node->issues_sql_like( [ - 'createdb', '-T', - 'template0', '-E', - 'UTF8', '--locale-provider=icu', - '--locale=C', '--icu-locale=en', - 'foobar5' + 'createdb', + '--template' => 'template0', + '--encoding' => 'UTF8', + '--locale-provider' => 'icu', + '--locale' => 'C', + '--icu-locale' => 'en', + 'foobar5', ], qr/statement: CREATE DATABASE foobar5 .* LOCALE_PROVIDER icu ICU_LOCALE 'en'/, 'create database with ICU locale specified'); $node->command_fails( [ - 'createdb', '-T', 'template0', '-E', 'UTF8', - '--locale-provider=icu', - '--icu-locale=@colNumeric=lower', 'foobarX' + 'createdb', + '--template' => 'template0', + '--encoding' => 'UTF8', + '--locale-provider' => 'icu', + '--icu-locale' => '@colNumeric=lower', + 'foobarX', ], 'fails for invalid ICU locale'); $node->command_fails_like( [ - 'createdb', '-T', - 'template0', '--locale-provider=icu', - '--encoding=SQL_ASCII', 'foobarX' + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'icu', + '--encoding' => 'SQL_ASCII', + 'foobarX', ], qr/ERROR: encoding "SQL_ASCII" is not supported with ICU provider/, 'fails for encoding not supported by ICU'); @@ -72,116 +88,144 @@ if ($ENV{with_icu} eq 'yes') $node2->command_ok( [ - 'createdb', '-T', - 'template0', '--locale-provider=libc', - 'foobar55' + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'libc', + 'foobar55', ], 'create database with libc provider from template database with icu provider' ); $node2->command_ok( [ - 'createdb', '-T', 'template0', '--icu-locale', 'en-US', - 'foobar56' + 'createdb', + '--template' => 'template0', + '--icu-locale' => 'en-US', + 'foobar56', ], 'create database with icu locale from template database with icu provider' ); $node2->command_ok( [ - 'createdb', '-T', - 'template0', '--locale-provider', - 'icu', '--locale', - 'en', '--lc-collate', - 'C', '--lc-ctype', - 'C', 'foobar57' + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'icu', + '--locale' => 'en', + '--lc-collate' => 'C', + '--lc-ctype' => 'C', + 'foobar57', ], 'create database with locale as ICU locale'); } else { $node->command_fails( - [ 'createdb', '-T', 'template0', '--locale-provider=icu', 'foobar4' ], + [ + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'icu', + 'foobar4', + ], 'create database with ICU fails since no ICU support'); } $node->command_fails( [ - 'createdb', '-T', - 'template0', '--locale-provider=builtin', - 'tbuiltin1' + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'builtin', + 'tbuiltin1', ], 'create database with provider "builtin" fails without --locale'); $node->command_ok( [ - 'createdb', '-T', - 'template0', '--locale-provider=builtin', - '--locale=C', 'tbuiltin2' + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'builtin', + '--locale' => 'C', + 'tbuiltin2', ], 'create database with provider "builtin" and locale "C"'); $node->command_ok( [ - 'createdb', '-T', - 'template0', '--locale-provider=builtin', - '--locale=C', '--lc-collate=C', - 'tbuiltin3' + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'builtin', + '--locale' => 'C', + '--lc-collate' => 'C', + 'tbuiltin3', ], 'create database with provider "builtin" and LC_COLLATE=C'); $node->command_ok( [ - 'createdb', '-T', - 'template0', '--locale-provider=builtin', - '--locale=C', '--lc-ctype=C', - 'tbuiltin4' + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'builtin', + '--locale' => 'C', + '--lc-ctype' => 'C', + 'tbuiltin4', ], 'create database with provider "builtin" and LC_CTYPE=C'); $node->command_ok( [ - 'createdb', '-T', - 'template0', '--locale-provider=builtin', - '--lc-collate=C', '--lc-ctype=C', - '-E UTF-8', '--builtin-locale=C.UTF8', - 'tbuiltin5' + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'builtin', + '--lc-collate' => 'C', + '--lc-ctype' => 'C', + '--encoding' => 'UTF-8', + '--builtin-locale' => 'C.UTF8', + 'tbuiltin5', ], 'create database with --builtin-locale C.UTF-8 and -E UTF-8'); $node->command_fails( [ - 'createdb', '-T', - 'template0', '--locale-provider=builtin', - '--lc-collate=C', '--lc-ctype=C', - '-E LATIN1', '--builtin-locale=C.UTF-8', - 'tbuiltin6' + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'builtin', + '--lc-collate' => 'C', + '--lc-ctype' => 'C', + '--encoding' => 'LATIN1', + '--builtin-locale' => 'C.UTF-8', + 'tbuiltin6', ], 'create database with --builtin-locale C.UTF-8 and -E LATIN1'); $node->command_fails( [ - 'createdb', '-T', - 'template0', '--locale-provider=builtin', - '--locale=C', '--icu-locale=en', - 'tbuiltin7' + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'builtin', + '--locale' => 'C', + '--icu-locale' => 'en', + 'tbuiltin7', ], 'create database with provider "builtin" and ICU_LOCALE="en"'); $node->command_fails( [ - 'createdb', '-T', - 'template0', '--locale-provider=builtin', - '--locale=C', '--icu-rules=""', - 'tbuiltin8' + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'builtin', + '--locale' => 'C', + '--icu-rules' => '""', + 'tbuiltin8', ], 'create database with provider "builtin" and ICU_RULES=""'); $node->command_fails( [ - 'createdb', '-T', - 'template1', '--locale-provider=builtin', - '--locale=C', 'tbuiltin9' + 'createdb', + '--template' => 'template1', + '--locale-provider' => 'builtin', + '--locale' => 'C', + 'tbuiltin9', ], 'create database with provider "builtin" not matching template'); @@ -189,7 +233,12 @@ $node->command_fails([ 'createdb', 'foobar1' ], 'fails if database already exists'); $node->command_fails( - [ 'createdb', '-T', 'template0', '--locale-provider=xyz', 'foobarX' ], + [ + 'createdb', + '--template' => 'template0', + '--locale-provider' => 'xyz', + 'foobarX', + ], 'fails for invalid locale provider'); # Check use of templates with shared dependencies copied from the template. @@ -200,7 +249,7 @@ CREATE TABLE tab_foobar (id int); ALTER TABLE tab_foobar owner to role_foobar; CREATE POLICY pol_foobar ON tab_foobar FOR ALL TO role_foobar;'); $node->issues_sql_like( - [ 'createdb', '-l', 'C', '-T', 'foobar2', 'foobar3' ], + [ 'createdb', '--locale' => 'C', '--template' => 'foobar2', 'foobar3' ], qr/statement: CREATE DATABASE foobar3 TEMPLATE foobar2 LOCALE 'C'/, 'create database with template'); ($ret, $stdout, $stderr) = $node->psql( @@ -228,7 +277,7 @@ $node->command_checks_all( 1, [qr/^$/], [ - qr/^createdb: error: database creation failed: ERROR: invalid LC_COLLATE locale name|^createdb: error: database creation failed: ERROR: new collation \(foo'; SELECT '1\) is incompatible with the collation of the template database/s + qr/^createdb: error: database creation failed: ERROR: invalid LC_COLLATE locale name|^createdb: error: database creation failed: ERROR: new collation \(foo'; SELECT '1\) is incompatible with the collation of the template database/s, ], 'createdb with incorrect --lc-collate'); $node->command_checks_all( @@ -236,7 +285,7 @@ $node->command_checks_all( 1, [qr/^$/], [ - qr/^createdb: error: database creation failed: ERROR: invalid LC_CTYPE locale name|^createdb: error: database creation failed: ERROR: new LC_CTYPE \(foo'; SELECT '1\) is incompatible with the LC_CTYPE of the template database/s + qr/^createdb: error: database creation failed: ERROR: invalid LC_CTYPE locale name|^createdb: error: database creation failed: ERROR: new LC_CTYPE \(foo'; SELECT '1\) is incompatible with the LC_CTYPE of the template database/s, ], 'createdb with incorrect --lc-ctype'); @@ -245,34 +294,59 @@ $node->command_checks_all( 1, [qr/^$/], [ - qr/^createdb: error: database creation failed: ERROR: invalid create database strategy "foo"/s + qr/^createdb: error: database creation failed: ERROR: invalid create database strategy "foo"/s, ], 'createdb with incorrect --strategy'); # Check database creation strategy $node->issues_sql_like( - [ 'createdb', '-T', 'foobar2', '-S', 'wal_log', 'foobar6' ], + [ + 'createdb', + '--template' => 'foobar2', + '--strategy' => 'wal_log', + 'foobar6', + ], qr/statement: CREATE DATABASE foobar6 STRATEGY wal_log TEMPLATE foobar2/, 'create database with WAL_LOG strategy'); $node->issues_sql_like( - [ 'createdb', '-T', 'foobar2', '-S', 'WAL_LOG', 'foobar6s' ], + [ + 'createdb', + '--template' => 'foobar2', + '--strategy' => 'WAL_LOG', + 'foobar6s', + ], qr/statement: CREATE DATABASE foobar6s STRATEGY "WAL_LOG" TEMPLATE foobar2/, 'create database with WAL_LOG strategy'); $node->issues_sql_like( - [ 'createdb', '-T', 'foobar2', '-S', 'file_copy', 'foobar7' ], + [ + 'createdb', + '--template' => 'foobar2', + '--strategy' => 'file_copy', + 'foobar7', + ], qr/statement: CREATE DATABASE foobar7 STRATEGY file_copy TEMPLATE foobar2/, 'create database with FILE_COPY strategy'); $node->issues_sql_like( - [ 'createdb', '-T', 'foobar2', '-S', 'FILE_COPY', 'foobar7s' ], + [ + 'createdb', + '--template' => 'foobar2', + '--strategy' => 'FILE_COPY', + 'foobar7s', + ], qr/statement: CREATE DATABASE foobar7s STRATEGY "FILE_COPY" TEMPLATE foobar2/, 'create database with FILE_COPY strategy'); # Create database owned by role_foobar. $node->issues_sql_like( - [ 'createdb', '-T', 'foobar2', '-O', 'role_foobar', 'foobar8' ], + [ + 'createdb', + '--template' => 'foobar2', + '--owner' => 'role_foobar', + 'foobar8', + ], qr/statement: CREATE DATABASE foobar8 OWNER role_foobar TEMPLATE foobar2/, 'create database with owner role_foobar'); ($ret, $stdout, $stderr) = diff --git a/src/bin/scripts/t/040_createuser.pl b/src/bin/scripts/t/040_createuser.pl index 2783ef8b0fc..54af43401bb 100644 --- a/src/bin/scripts/t/040_createuser.pl +++ b/src/bin/scripts/t/040_createuser.pl @@ -21,34 +21,37 @@ $node->issues_sql_like( qr/statement: CREATE ROLE regress_user1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/, 'SQL CREATE USER run'); $node->issues_sql_like( - [ 'createuser', '-L', 'regress_role1' ], + [ 'createuser', '--no-login', 'regress_role1' ], qr/statement: CREATE ROLE regress_role1 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT NOLOGIN NOREPLICATION NOBYPASSRLS;/, 'create a non-login role'); $node->issues_sql_like( - [ 'createuser', '-r', 'regress user2' ], + [ 'createuser', '--createrole', 'regress user2' ], qr/statement: CREATE ROLE "regress user2" NOSUPERUSER NOCREATEDB CREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/, 'create a CREATEROLE user'); $node->issues_sql_like( - [ 'createuser', '-s', 'regress_user3' ], + [ 'createuser', '--superuser', 'regress_user3' ], qr/statement: CREATE ROLE regress_user3 SUPERUSER CREATEDB CREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/, 'create a superuser'); $node->issues_sql_like( [ - 'createuser', '-a', - 'regress_user1', '-a', - 'regress user2', 'regress user #4' + 'createuser', + '--with-admin' => 'regress_user1', + '--with-admin' => 'regress user2', + 'regress user #4' ], qr/statement: CREATE ROLE "regress user #4" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ADMIN regress_user1,"regress user2";/, 'add a role as a member with admin option of the newly created role'); $node->issues_sql_like( [ - 'createuser', 'REGRESS_USER5', '-m', 'regress_user3', - '-m', 'regress user #4' + 'createuser', + 'REGRESS_USER5', + '--with-member' => 'regress_user3', + '--with-member' => 'regress user #4' ], qr/statement: CREATE ROLE "REGRESS_USER5" NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ROLE regress_user3,"regress user #4";/, 'add a role as a member of the newly created role'); $node->issues_sql_like( - [ 'createuser', '-v', '2029 12 31', 'regress_user6' ], + [ 'createuser', '--valid-until' => '2029 12 31', 'regress_user6' ], qr/statement: CREATE ROLE regress_user6 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS VALID UNTIL \'2029 12 31\';/, 'create a role with a password expiration date'); $node->issues_sql_like( @@ -60,26 +63,31 @@ $node->issues_sql_like( qr/statement: CREATE ROLE regress_user8 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS;/, 'create a role without BYPASSRLS'); $node->issues_sql_like( - [ 'createuser', '--with-admin', 'regress_user1', 'regress_user9' ], + [ 'createuser', '--with-admin' => 'regress_user1', 'regress_user9' ], qr/statement: CREATE ROLE regress_user9 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ADMIN regress_user1;/, '--with-admin'); $node->issues_sql_like( - [ 'createuser', '--with-member', 'regress_user1', 'regress_user10' ], + [ 'createuser', '--with-member' => 'regress_user1', 'regress_user10' ], qr/statement: CREATE ROLE regress_user10 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS ROLE regress_user1;/, '--with-member'); $node->issues_sql_like( - [ 'createuser', '--role', 'regress_user1', 'regress_user11' ], + [ 'createuser', '--role' => 'regress_user1', 'regress_user11' ], qr/statement: CREATE ROLE regress_user11 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS IN ROLE regress_user1;/, '--role'); $node->issues_sql_like( - [ 'createuser', 'regress_user12', '--member-of', 'regress_user1' ], + [ 'createuser', 'regress_user12', '--member-of' => 'regress_user1' ], qr/statement: CREATE ROLE regress_user12 NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN NOREPLICATION NOBYPASSRLS IN ROLE regress_user1;/, '--member-of'); $node->command_fails([ 'createuser', 'regress_user1' ], 'fails if role already exists'); $node->command_fails( - [ 'createuser', 'regress_user1', '-m', 'regress_user2', 'regress_user3' ], + [ + 'createuser', + 'regress_user1', + '--with-member' => 'regress_user2', + 'regress_user3' + ], 'fails for too many non-options'); done_testing(); diff --git a/src/bin/scripts/t/080_pg_isready.pl b/src/bin/scripts/t/080_pg_isready.pl index 8bb9f38f501..f184bd77388 100644 --- a/src/bin/scripts/t/080_pg_isready.pl +++ b/src/bin/scripts/t/080_pg_isready.pl @@ -20,7 +20,10 @@ $node->command_fails(['pg_isready'], 'fails with no server running'); $node->start; $node->command_ok( - [ 'pg_isready', "--timeout=$PostgreSQL::Test::Utils::timeout_default" ], + [ + 'pg_isready', + '--timeout' => $PostgreSQL::Test::Utils::timeout_default, + ], 'succeeds with server running'); done_testing(); diff --git a/src/bin/scripts/t/090_reindexdb.pl b/src/bin/scripts/t/090_reindexdb.pl index 9110974e8a7..378f8ad7a58 100644 --- a/src/bin/scripts/t/090_reindexdb.pl +++ b/src/bin/scripts/t/090_reindexdb.pl @@ -96,7 +96,7 @@ test1|test1x|OID is unchanged|relfilenode has changed), $node->safe_psql('postgres', "TRUNCATE index_relfilenodes; $save_relfilenodes"); $node->issues_sql_like( - [ 'reindexdb', '-s', 'postgres' ], + [ 'reindexdb', '--system', 'postgres' ], qr/statement: REINDEX SYSTEM postgres;/, 'reindex system tables'); $relnode_info = $node->safe_psql('postgres', $compare_relfilenodes); @@ -108,29 +108,37 @@ test1|test1x|OID is unchanged|relfilenode is unchanged), 'relfilenode change after REINDEX SYSTEM'); $node->issues_sql_like( - [ 'reindexdb', '-t', 'test1', 'postgres' ], + [ 'reindexdb', '--table' => 'test1', 'postgres' ], qr/statement: REINDEX TABLE public\.test1;/, 'reindex specific table'); $node->issues_sql_like( - [ 'reindexdb', '-t', 'test1', '--tablespace', $tbspace_name, 'postgres' ], + [ + 'reindexdb', + '--table' => 'test1', + '--tablespace' => $tbspace_name, + 'postgres', + ], qr/statement: REINDEX \(TABLESPACE $tbspace_name\) TABLE public\.test1;/, 'reindex specific table on tablespace'); $node->issues_sql_like( - [ 'reindexdb', '-i', 'test1x', 'postgres' ], + [ 'reindexdb', '--index' => 'test1x', 'postgres' ], qr/statement: REINDEX INDEX public\.test1x;/, 'reindex specific index'); $node->issues_sql_like( - [ 'reindexdb', '-S', 'pg_catalog', 'postgres' ], + [ 'reindexdb', '--schema' => 'pg_catalog', 'postgres' ], qr/statement: REINDEX SCHEMA pg_catalog;/, 'reindex specific schema'); $node->issues_sql_like( - [ 'reindexdb', '-v', '-t', 'test1', 'postgres' ], + [ 'reindexdb', '--verbose', '--table' => 'test1', 'postgres' ], qr/statement: REINDEX \(VERBOSE\) TABLE public\.test1;/, 'reindex with verbose output'); $node->issues_sql_like( [ - 'reindexdb', '-v', '-t', 'test1', - '--tablespace', $tbspace_name, 'postgres' + 'reindexdb', + '--verbose', + '--table' => 'test1', + '--tablespace' => $tbspace_name, + 'postgres', ], qr/statement: REINDEX \(VERBOSE, TABLESPACE $tbspace_name\) TABLE public\.test1;/, 'reindex with verbose output and tablespace'); @@ -153,27 +161,36 @@ test1|test1x|OID has changed|relfilenode has changed), 'OID change after REINDEX DATABASE CONCURRENTLY'); $node->issues_sql_like( - [ 'reindexdb', '--concurrently', '-t', 'test1', 'postgres' ], + [ 'reindexdb', '--concurrently', '--table' => 'test1', 'postgres' ], qr/statement: REINDEX TABLE CONCURRENTLY public\.test1;/, 'reindex specific table concurrently'); $node->issues_sql_like( - [ 'reindexdb', '--concurrently', '-i', 'test1x', 'postgres' ], + [ 'reindexdb', '--concurrently', '--index' => 'test1x', 'postgres' ], qr/statement: REINDEX INDEX CONCURRENTLY public\.test1x;/, 'reindex specific index concurrently'); $node->issues_sql_like( - [ 'reindexdb', '--concurrently', '-S', 'public', 'postgres' ], + [ 'reindexdb', '--concurrently', '--schema' => 'public', 'postgres' ], qr/statement: REINDEX SCHEMA CONCURRENTLY public;/, 'reindex specific schema concurrently'); -$node->command_fails([ 'reindexdb', '--concurrently', '-s', 'postgres' ], +$node->command_fails( + [ 'reindexdb', '--concurrently', '--system', 'postgres' ], 'reindex system tables concurrently'); $node->issues_sql_like( - [ 'reindexdb', '--concurrently', '-v', '-t', 'test1', 'postgres' ], + [ + 'reindexdb', '--concurrently', '--verbose', + '--table' => 'test1', + 'postgres', + ], qr/statement: REINDEX \(VERBOSE\) TABLE CONCURRENTLY public\.test1;/, 'reindex with verbose output concurrently'); $node->issues_sql_like( [ - 'reindexdb', '--concurrently', '-v', '-t', - 'test1', '--tablespace', $tbspace_name, 'postgres' + 'reindexdb', + '--concurrently', + '--verbose', + '--table' => 'test1', + '--tablespace' => $tbspace_name, + 'postgres', ], qr/statement: REINDEX \(VERBOSE, TABLESPACE $tbspace_name\) TABLE CONCURRENTLY public\.test1;/, 'reindex concurrently with verbose output and tablespace'); @@ -185,8 +202,10 @@ $node->issues_sql_like( # messages. $node->command_checks_all( [ - 'reindexdb', '-t', $toast_table, '--tablespace', - $tbspace_name, 'postgres' + 'reindexdb', + '--table' => $toast_table, + '--tablespace' => $tbspace_name, + 'postgres', ], 1, [], @@ -194,8 +213,11 @@ $node->command_checks_all( 'reindex toast table with tablespace'); $node->command_checks_all( [ - 'reindexdb', '--concurrently', '-t', $toast_table, - '--tablespace', $tbspace_name, 'postgres' + 'reindexdb', + '--concurrently', + '--table' => $toast_table, + '--tablespace' => $tbspace_name, + 'postgres', ], 1, [], @@ -203,8 +225,10 @@ $node->command_checks_all( 'reindex toast table concurrently with tablespace'); $node->command_checks_all( [ - 'reindexdb', '-i', $toast_index, '--tablespace', - $tbspace_name, 'postgres' + 'reindexdb', + '--index' => $toast_index, + '--tablespace' => $tbspace_name, + 'postgres', ], 1, [], @@ -212,8 +236,11 @@ $node->command_checks_all( 'reindex toast index with tablespace'); $node->command_checks_all( [ - 'reindexdb', '--concurrently', '-i', $toast_index, - '--tablespace', $tbspace_name, 'postgres' + 'reindexdb', + '--concurrently', + '--index' => $toast_index, + '--tablespace' => $tbspace_name, + 'postgres', ], 1, [], @@ -246,35 +273,51 @@ $node->safe_psql( |); $node->command_fails( - [ 'reindexdb', '-j', '2', '-s', 'postgres' ], + [ 'reindexdb', '--jobs' => '2', '--system', 'postgres' ], 'parallel reindexdb cannot process system catalogs'); $node->command_ok( - [ 'reindexdb', '-j', '2', '-i', 's1.i1', '-i', 's2.i2', 'postgres' ], + [ + 'reindexdb', + '--jobs' => '2', + '--index' => 's1.i1', + '--index' => 's2.i2', + 'postgres', + ], 'parallel reindexdb for indices'); # Note that the ordering of the commands is not stable, so the second # command for s2.t2 is not checked after. $node->issues_sql_like( - [ 'reindexdb', '-j', '2', '-S', 's1', '-S', 's2', 'postgres' ], + [ + 'reindexdb', + '--jobs' => '2', + '--schema' => 's1', + '--schema' => 's2', + 'postgres', + ], qr/statement:\ REINDEX TABLE s1.t1;/, 'parallel reindexdb for schemas does a per-table REINDEX'); -$node->command_ok( - [ 'reindexdb', '-j', '2', '-S', 's3' ], +$node->command_ok([ 'reindexdb', '--jobs' => '2', '--schema' => 's3' ], 'parallel reindexdb with empty schema'); $node->command_ok( - [ 'reindexdb', '-j', '2', '--concurrently', '-d', 'postgres' ], + [ + 'reindexdb', + '--jobs' => '2', + '--concurrently', + '--dbname' => 'postgres', + ], 'parallel reindexdb on database, concurrently'); # combinations of objects $node->issues_sql_like( - [ 'reindexdb', '-s', '-t', 'test1', 'postgres' ], + [ 'reindexdb', '--system', '--table' => 'test1', 'postgres' ], qr/statement:\ REINDEX SYSTEM postgres;/, 'specify both --system and --table'); $node->issues_sql_like( - [ 'reindexdb', '-s', '-i', 'test1x', 'postgres' ], + [ 'reindexdb', '--system', '--index' => 'test1x', 'postgres' ], qr/statement:\ REINDEX INDEX public.test1x;/, 'specify both --system and --index'); $node->issues_sql_like( - [ 'reindexdb', '-s', '-S', 'pg_catalog', 'postgres' ], + [ 'reindexdb', '--system', '--schema' => 'pg_catalog', 'postgres' ], qr/statement:\ REINDEX SCHEMA pg_catalog;/, 'specify both --system and --schema'); diff --git a/src/bin/scripts/t/091_reindexdb_all.pl b/src/bin/scripts/t/091_reindexdb_all.pl index 3da5f3a9ef8..6a75946b2b9 100644 --- a/src/bin/scripts/t/091_reindexdb_all.pl +++ b/src/bin/scripts/t/091_reindexdb_all.pl @@ -18,23 +18,23 @@ $node->safe_psql('postgres', $node->safe_psql('template1', 'CREATE TABLE test1 (a int); CREATE INDEX test1x ON test1 (a);'); $node->issues_sql_like( - [ 'reindexdb', '-a' ], + [ 'reindexdb', '--all' ], qr/statement: REINDEX.*statement: REINDEX/s, 'reindex all databases'); $node->issues_sql_like( - [ 'reindexdb', '-a', '-s' ], + [ 'reindexdb', '--all', '--system' ], qr/statement: REINDEX SYSTEM postgres/s, 'reindex system catalogs in all databases'); $node->issues_sql_like( - [ 'reindexdb', '-a', '-S', 'public' ], + [ 'reindexdb', '--all', '--schema' => 'public' ], qr/statement: REINDEX SCHEMA public/s, 'reindex schema in all databases'); $node->issues_sql_like( - [ 'reindexdb', '-a', '-i', 'test1x' ], + [ 'reindexdb', '--all', '--index' => 'test1x' ], qr/statement: REINDEX INDEX public\.test1x/s, 'reindex index in all databases'); $node->issues_sql_like( - [ 'reindexdb', '-a', '-t', 'test1' ], + [ 'reindexdb', '--all', '--table' => 'test1' ], qr/statement: REINDEX TABLE public\.test1/s, 'reindex table in all databases'); @@ -43,13 +43,13 @@ $node->safe_psql( CREATE DATABASE regression_invalid; UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid'; )); -$node->command_ok([ 'reindexdb', '-a' ], - 'invalid database not targeted by reindexdb -a'); +$node->command_ok([ 'reindexdb', '--all' ], + 'invalid database not targeted by reindexdb --all'); # Doesn't quite belong here, but don't want to waste time by creating an # invalid database in 090_reindexdb.pl as well. $node->command_fails_like( - [ 'reindexdb', '-d', 'regression_invalid' ], + [ 'reindexdb', '--dbname' => 'regression_invalid' ], qr/FATAL: cannot connect to invalid database "regression_invalid"/, 'reindexdb cannot target invalid database'); diff --git a/src/bin/scripts/t/100_vacuumdb.pl b/src/bin/scripts/t/100_vacuumdb.pl index ccb7711af43..2d174df9aae 100644 --- a/src/bin/scripts/t/100_vacuumdb.pl +++ b/src/bin/scripts/t/100_vacuumdb.pl @@ -80,11 +80,11 @@ $node->command_fails( [ 'vacuumdb', '--analyze-only', '--no-process-toast', 'postgres' ], '--analyze-only and --no-process-toast specified together'); $node->issues_sql_like( - [ 'vacuumdb', '-P', 2, 'postgres' ], + [ 'vacuumdb', '--parallel' => 2, 'postgres' ], qr/statement: VACUUM \(SKIP_DATABASE_STATS, PARALLEL 2\).*;/, 'vacuumdb -P 2'); $node->issues_sql_like( - [ 'vacuumdb', '-P', 0, 'postgres' ], + [ 'vacuumdb', '--parallel' => 0, 'postgres' ], qr/statement: VACUUM \(SKIP_DATABASE_STATS, PARALLEL 0\).*;/, 'vacuumdb -P 0'); $node->command_ok([qw(vacuumdb -Z --table=pg_am dbname=template1)], @@ -118,94 +118,123 @@ $node->command_ok([qw|vacuumdb -Z --table="need""q(uot"(")x") postgres|], 'column list'); $node->command_fails( - [ 'vacuumdb', '--analyze', '--table', 'vactable(c)', 'postgres' ], + [ 'vacuumdb', '--analyze', '--table' => 'vactable(c)', 'postgres' ], 'incorrect column name with ANALYZE'); -$node->command_fails([ 'vacuumdb', '-P', -1, 'postgres' ], +$node->command_fails([ 'vacuumdb', '--parallel' => -1, 'postgres' ], 'negative parallel degree'); $node->issues_sql_like( - [ 'vacuumdb', '--analyze', '--table', 'vactable(a, b)', 'postgres' ], + [ 'vacuumdb', '--analyze', '--table' => 'vactable(a, b)', 'postgres' ], qr/statement: VACUUM \(SKIP_DATABASE_STATS, ANALYZE\) public.vactable\(a, b\);/, 'vacuumdb --analyze with complete column list'); $node->issues_sql_like( - [ 'vacuumdb', '--analyze-only', '--table', 'vactable(b)', 'postgres' ], + [ 'vacuumdb', '--analyze-only', '--table' => 'vactable(b)', 'postgres' ], qr/statement: ANALYZE public.vactable\(b\);/, 'vacuumdb --analyze-only with partial column list'); $node->command_checks_all( - [ 'vacuumdb', '--analyze', '--table', 'vacview', 'postgres' ], + [ 'vacuumdb', '--analyze', '--table' => 'vacview', 'postgres' ], 0, [qr/^.*vacuuming database "postgres"/], [qr/^WARNING.*cannot vacuum non-tables or special system tables/s], 'vacuumdb with view'); $node->command_fails( - [ 'vacuumdb', '--table', 'vactable', '--min-mxid-age', '0', 'postgres' ], + [ + 'vacuumdb', + '--table' => 'vactable', + '--min-mxid-age' => '0', + 'postgres' + ], 'vacuumdb --min-mxid-age with incorrect value'); $node->command_fails( - [ 'vacuumdb', '--table', 'vactable', '--min-xid-age', '0', 'postgres' ], + [ + 'vacuumdb', + '--table' => 'vactable', + '--min-xid-age' => '0', + 'postgres' + ], 'vacuumdb --min-xid-age with incorrect value'); $node->issues_sql_like( [ - 'vacuumdb', '--table', 'vactable', '--min-mxid-age', - '2147483000', 'postgres' + 'vacuumdb', + '--table' => 'vactable', + '--min-mxid-age' => '2147483000', + 'postgres' ], qr/GREATEST.*relminmxid.*2147483000/, 'vacuumdb --table --min-mxid-age'); $node->issues_sql_like( - [ 'vacuumdb', '--min-xid-age', '2147483001', 'postgres' ], + [ 'vacuumdb', '--min-xid-age' => '2147483001', 'postgres' ], qr/GREATEST.*relfrozenxid.*2147483001/, 'vacuumdb --table --min-xid-age'); $node->issues_sql_like( - [ 'vacuumdb', '--schema', '"Foo"', 'postgres' ], + [ 'vacuumdb', '--schema' => '"Foo"', 'postgres' ], qr/VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar/, 'vacuumdb --schema'); $node->issues_sql_like( - [ 'vacuumdb', '--schema', '"Foo"', '--schema', '"Bar"', 'postgres' ], + [ 'vacuumdb', '--schema' => '"Foo"', '--schema' => '"Bar"', 'postgres' ], qr/VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar .*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz /sx, 'vacuumdb multiple --schema switches'); $node->issues_sql_like( - [ 'vacuumdb', '--exclude-schema', '"Foo"', 'postgres' ], + [ 'vacuumdb', '--exclude-schema' => '"Foo"', 'postgres' ], qr/^(?!.*VACUUM \(SKIP_DATABASE_STATS\) "Foo".bar).*$/s, 'vacuumdb --exclude-schema'); $node->issues_sql_like( [ - 'vacuumdb', '--exclude-schema', '"Foo"', '--exclude-schema', - '"Bar"', 'postgres' + 'vacuumdb', + '--exclude-schema' => '"Foo"', + '--exclude-schema' => '"Bar"', + 'postgres' ], qr/^(?!.*VACUUM\ \(SKIP_DATABASE_STATS\)\ "Foo".bar | VACUUM\ \(SKIP_DATABASE_STATS\)\ "Bar".baz).*$/sx, 'vacuumdb multiple --exclude-schema switches'); $node->command_fails_like( - [ 'vacuumdb', '-N', 'pg_catalog', '-t', 'pg_class', 'postgres', ], + [ + 'vacuumdb', + '--exclude-schema' => 'pg_catalog', + '--table' => 'pg_class', + 'postgres', + ], qr/cannot vacuum specific table\(s\) and exclude schema\(s\) at the same time/, - 'cannot use options -N and -t at the same time'); + 'cannot use options --excludes-chema and ---table at the same time'); $node->command_fails_like( - [ 'vacuumdb', '-n', 'pg_catalog', '-t', 'pg_class', 'postgres' ], + [ + 'vacuumdb', + '--schema' => 'pg_catalog', + '--table' => 'pg_class', + 'postgres' + ], qr/cannot vacuum all tables in schema\(s\) and specific table\(s\) at the same time/, - 'cannot use options -n and -t at the same time'); + 'cannot use options --schema and ---table at the same time'); $node->command_fails_like( - [ 'vacuumdb', '-n', 'pg_catalog', '-N', '"Foo"', 'postgres' ], + [ + 'vacuumdb', + '--schema' => 'pg_catalog', + '--exclude-schema' => '"Foo"', + 'postgres' + ], qr/cannot vacuum all tables in schema\(s\) and exclude schema\(s\) at the same time/, - 'cannot use options -n and -N at the same time'); + 'cannot use options --schema and --exclude-schema at the same time'); $node->issues_sql_like( - [ 'vacuumdb', '-a', '-N', 'pg_catalog' ], + [ 'vacuumdb', '--all', '--exclude-schema' => 'pg_catalog' ], qr/(?:(?!VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class).)*/, - 'vacuumdb -a -N'); + 'vacuumdb --all --exclude-schema'); $node->issues_sql_like( - [ 'vacuumdb', '-a', '-n', 'pg_catalog' ], + [ 'vacuumdb', '--all', '--schema' => 'pg_catalog' ], qr/VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class/, - 'vacuumdb -a -n'); + 'vacuumdb --all ---schema'); $node->issues_sql_like( - [ 'vacuumdb', '-a', '-t', 'pg_class' ], + [ 'vacuumdb', '--all', '--table' => 'pg_class' ], qr/VACUUM \(SKIP_DATABASE_STATS\) pg_catalog.pg_class/, - 'vacuumdb -a -t'); + 'vacuumdb --all --table'); $node->command_fails_like( - [ 'vacuumdb', '-a', '-d', 'postgres' ], + [ 'vacuumdb', '--all', '-d' => 'postgres' ], qr/cannot vacuum all databases and a specific one at the same time/, - 'cannot use options -a and -d at the same time'); + 'cannot use options --all and --dbname at the same time'); $node->command_fails_like( - [ 'vacuumdb', '-a', 'postgres' ], + [ 'vacuumdb', '--all', 'postgres' ], qr/cannot vacuum all databases and a specific one at the same time/, - 'cannot use option -a and a dbname as argument at the same time'); + 'cannot use option --all and a dbname as argument at the same time'); done_testing(); diff --git a/src/bin/scripts/t/101_vacuumdb_all.pl b/src/bin/scripts/t/101_vacuumdb_all.pl index cfdf00c323c..74cb22dc341 100644 --- a/src/bin/scripts/t/101_vacuumdb_all.pl +++ b/src/bin/scripts/t/101_vacuumdb_all.pl @@ -12,7 +12,7 @@ $node->init; $node->start; $node->issues_sql_like( - [ 'vacuumdb', '-a' ], + [ 'vacuumdb', '--all' ], qr/statement: VACUUM.*statement: VACUUM/s, 'vacuum all databases'); @@ -21,13 +21,13 @@ $node->safe_psql( CREATE DATABASE regression_invalid; UPDATE pg_database SET datconnlimit = -2 WHERE datname = 'regression_invalid'; )); -$node->command_ok([ 'vacuumdb', '-a' ], +$node->command_ok([ 'vacuumdb', '--all' ], 'invalid database not targeted by vacuumdb -a'); # Doesn't quite belong here, but don't want to waste time by creating an # invalid database in 010_vacuumdb.pl as well. $node->command_fails_like( - [ 'vacuumdb', '-d', 'regression_invalid' ], + [ 'vacuumdb', '--dbname' => 'regression_invalid' ], qr/FATAL: cannot connect to invalid database "regression_invalid"/, 'vacuumdb cannot target invalid database'); diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl index c97adf17185..ee57d234c86 100644 --- a/src/test/recovery/t/001_stream_rep.pl +++ b/src/test/recovery/t/001_stream_rep.pl @@ -566,8 +566,11 @@ my $connstr = $node_primary->connstr('postgres') . " replication=database"; # a replication command and a SQL command. $node_primary->command_fails_like( [ - 'psql', '-X', '-c', "SELECT pg_backup_start('backup', true)", - '-c', 'BASE_BACKUP', '-d', $connstr + 'psql', + '--no-psqlrc', + '--command' => "SELECT pg_backup_start('backup', true)", + '--command' => 'BASE_BACKUP', + '--dbname' => $connstr ], qr/a backup is already in progress in this session/, 'BASE_BACKUP cannot run in session already running backup'); diff --git a/src/test/recovery/t/003_recovery_targets.pl b/src/test/recovery/t/003_recovery_targets.pl index a0e8ffdea40..0ae2e982727 100644 --- a/src/test/recovery/t/003_recovery_targets.pl +++ b/src/test/recovery/t/003_recovery_targets.pl @@ -57,7 +57,7 @@ $node_primary->init(has_archiving => 1, allows_streaming => 1); # Bump the transaction ID epoch. This is useful to stress the portability # of recovery_target_xid parsing. -system_or_bail('pg_resetwal', '--epoch', '1', $node_primary->data_dir); +system_or_bail('pg_resetwal', '--epoch' => '1', $node_primary->data_dir); # Start it $node_primary->start; @@ -147,8 +147,10 @@ recovery_target_time = '$recovery_time'"); my $res = run_log( [ - 'pg_ctl', '-D', $node_standby->data_dir, '-l', - $node_standby->logfile, 'start' + 'pg_ctl', + '--pgdata' => $node_standby->data_dir, + '--log' => $node_standby->logfile, + 'start', ]); ok(!$res, 'invalid recovery startup fails'); @@ -168,8 +170,10 @@ $node_standby->append_conf('postgresql.conf', run_log( [ - 'pg_ctl', '-D', $node_standby->data_dir, '-l', - $node_standby->logfile, 'start' + 'pg_ctl', + '--pgdata' => $node_standby->data_dir, + '--log' => $node_standby->logfile, + 'start', ]); # wait for postgres to terminate diff --git a/src/test/recovery/t/017_shm.pl b/src/test/recovery/t/017_shm.pl index fa712fcb4b8..e2e85d471fe 100644 --- a/src/test/recovery/t/017_shm.pl +++ b/src/test/recovery/t/017_shm.pl @@ -160,13 +160,13 @@ my $pre_existing_msg = qr/pre-existing shared memory block/; like(slurp_file($gnat->logfile), $pre_existing_msg, 'detected live backend via shared memory'); # Reject single-user startup. -my $single_stderr; -ok( !run_log( - [ 'postgres', '--single', '-D', $gnat->data_dir, 'template1' ], - '<', \undef, '2>', \$single_stderr), - 'live query blocks --single'); -print STDERR $single_stderr; -like($single_stderr, $pre_existing_msg, +command_fails_like( + [ + 'postgres', '--single', + '-D' => $gnat->data_dir, + 'template1' + ], + $pre_existing_msg, 'single-user mode detected live backend via shared memory'); log_ipcs(); diff --git a/src/test/recovery/t/024_archive_recovery.pl b/src/test/recovery/t/024_archive_recovery.pl index 5f8054b5376..b4527ec0843 100644 --- a/src/test/recovery/t/024_archive_recovery.pl +++ b/src/test/recovery/t/024_archive_recovery.pl @@ -76,9 +76,10 @@ sub test_recovery_wal_level_minimal # that the server ends with an error during recovery. run_log( [ - 'pg_ctl', '-D', - $recovery_node->data_dir, '-l', - $recovery_node->logfile, 'start' + 'pg_ctl', + '--pgdata' => $recovery_node->data_dir, + '--log' => $recovery_node->logfile, + 'start', ]); # wait for postgres to terminate diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl index 467113b1379..bab7b28084b 100644 --- a/src/test/recovery/t/027_stream_regress.pl +++ b/src/test/recovery/t/027_stream_regress.pl @@ -105,19 +105,23 @@ $node_primary->wait_for_replay_catchup($node_standby_1); # Perform a logical dump of primary and standby, and check that they match command_ok( [ - 'pg_dumpall', '-f', $outputdir . '/primary.dump', - '--no-sync', '-p', $node_primary->port, - '--no-unlogged-table-data' # if unlogged, standby has schema only + 'pg_dumpall', + '--file' => $outputdir . '/primary.dump', + '--no-sync', + '--port' => $node_primary->port, + '--no-unlogged-table-data', # if unlogged, standby has schema only ], 'dump primary server'); command_ok( [ - 'pg_dumpall', '-f', $outputdir . '/standby.dump', - '--no-sync', '-p', $node_standby_1->port + 'pg_dumpall', + '--file' => $outputdir . '/standby.dump', + '--no-sync', + '--port' => $node_standby_1->port, ], 'dump standby server'); command_ok( - [ 'diff', $outputdir . '/primary.dump', $outputdir . '/standby.dump' ], + [ 'diff', $outputdir . '/primary.dump', $outputdir . '/standby.dump', ], 'compare primary and standby dumps'); # Likewise for the catalogs of the regression database, after disabling @@ -128,29 +132,29 @@ $node_primary->wait_for_replay_catchup($node_standby_1); command_ok( [ 'pg_dump', - ('--schema', 'pg_catalog'), - ('-f', $outputdir . '/catalogs_primary.dump'), + '--schema' => 'pg_catalog', + '--file' => $outputdir . '/catalogs_primary.dump', '--no-sync', - ('-p', $node_primary->port), + '--port', $node_primary->port, '--no-unlogged-table-data', - 'regression' + 'regression', ], 'dump catalogs of primary server'); command_ok( [ 'pg_dump', - ('--schema', 'pg_catalog'), - ('-f', $outputdir . '/catalogs_standby.dump'), + '--schema' => 'pg_catalog', + '--file' => $outputdir . '/catalogs_standby.dump', '--no-sync', - ('-p', $node_standby_1->port), - 'regression' + '--port' => $node_standby_1->port, + 'regression', ], 'dump catalogs of standby server'); command_ok( [ 'diff', $outputdir . '/catalogs_primary.dump', - $outputdir . '/catalogs_standby.dump' + $outputdir . '/catalogs_standby.dump', ], 'compare primary and standby catalog dumps'); diff --git a/src/test/ssl/t/001_ssltests.pl b/src/test/ssl/t/001_ssltests.pl index c4a8dbb0015..5422511d4ab 100644 --- a/src/test/ssl/t/001_ssltests.pl +++ b/src/test/ssl/t/001_ssltests.pl @@ -543,12 +543,14 @@ $node->connect_fails( # pg_stat_ssl command_like( [ - 'psql', '-X', - '-A', '-F', - ',', '-P', - 'null=_null_', '-d', - "$common_connstr sslrootcert=invalid", '-c', - "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()" + 'psql', + '--no-psqlrc', + '--no-align', + '--field-separator' => ',', + '--pset', => 'null=_null_', + '--dbname' => "$common_connstr sslrootcert=invalid", + '--command' => + "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()" ], qr{^pid,ssl,version,cipher,bits,client_dn,client_serial,issuer_dn\r?\n ^\d+,t,TLSv[\d.]+,[\w-]+,\d+,_null_,_null_,_null_\r?$}mx, @@ -742,17 +744,15 @@ else command_like( [ 'psql', - '-X', - '-A', - '-F', - ',', - '-P', - 'null=_null_', - '-d', - "$common_connstr user=ssltestuser sslcert=ssl/client.crt " + '--no-psqlrc', + '--no-align', + '--field-separator' => ',', + '--pset' => 'null=_null_', + '--dbname' => + "$common_connstr user=ssltestuser sslcert=ssl/client.crt " . sslkey('client.key'), - '-c', - "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()" + '--command' => + "SELECT * FROM pg_stat_ssl WHERE pid = pg_backend_pid()" ], qr{^pid,ssl,version,cipher,bits,client_dn,client_serial,issuer_dn\r?\n ^\d+,t,TLSv[\d.]+,[\w-]+,\d+,/?CN=ssltestuser,$serialno,/?\QCN=Test CA for PostgreSQL SSL regression test client certs\E\r?$}mx,