diff options
Diffstat (limited to 'src/test/recovery')
| -rw-r--r-- | src/test/recovery/t/001_stream_rep.pl | 3 | ||||
| -rw-r--r-- | src/test/recovery/t/002_archiving.pl | 2 | ||||
| -rw-r--r-- | src/test/recovery/t/006_logical_decoding.pl | 80 | ||||
| -rw-r--r-- | src/test/recovery/t/013_crash_restart.pl | 20 | ||||
| -rw-r--r-- | src/test/recovery/t/014_unlogged_reinit.pl | 3 | ||||
| -rw-r--r-- | src/test/recovery/t/019_replslot_limit.pl | 10 | ||||
| -rw-r--r-- | src/test/recovery/t/022_crash_temp_files.pl | 26 | ||||
| -rw-r--r-- | src/test/recovery/t/027_stream_regress.pl | 54 | ||||
| -rw-r--r-- | src/test/recovery/t/029_stats_restart.pl | 3 | ||||
| -rw-r--r-- | src/test/recovery/t/031_recovery_conflict.pl | 9 | ||||
| -rw-r--r-- | src/test/recovery/t/032_relfilenode_reuse.pl | 82 |
11 files changed, 170 insertions, 122 deletions
diff --git a/src/test/recovery/t/001_stream_rep.pl b/src/test/recovery/t/001_stream_rep.pl index 583ee87da8..86864098f9 100644 --- a/src/test/recovery/t/001_stream_rep.pl +++ b/src/test/recovery/t/001_stream_rep.pl @@ -374,7 +374,8 @@ sub replay_check ); my $primary_lsn = $node_primary->lsn('write'); $node_primary->wait_for_catchup($node_standby_1, 'replay', $primary_lsn); - $node_standby_1->wait_for_catchup($node_standby_2, 'replay', $primary_lsn); + $node_standby_1->wait_for_catchup($node_standby_2, 'replay', + $primary_lsn); $node_standby_1->safe_psql('postgres', qq[SELECT 1 FROM replayed WHERE val = $newval]) diff --git a/src/test/recovery/t/002_archiving.pl b/src/test/recovery/t/002_archiving.pl index 01c52d8e7f..d69da4e5ef 100644 --- a/src/test/recovery/t/002_archiving.pl +++ b/src/test/recovery/t/002_archiving.pl @@ -125,7 +125,7 @@ my $log_location = -s $node_standby2->logfile; $node_standby2->promote; # Check the logs of the standby to see that the commands have failed. -my $log_contents = slurp_file($node_standby2->logfile, $log_location); +my $log_contents = slurp_file($node_standby2->logfile, $log_location); my $node_standby2_data = $node_standby2->data_dir; like( diff --git a/src/test/recovery/t/006_logical_decoding.pl b/src/test/recovery/t/006_logical_decoding.pl index 3ccced2ea2..0cd0467fbb 100644 --- a/src/test/recovery/t/006_logical_decoding.pl +++ b/src/test/recovery/t/006_logical_decoding.pl @@ -206,62 +206,68 @@ my $stats_test_slot2 = 'logical_slot'; # Test that reset works for pg_stat_replication_slots # Stats exist for stats test slot 1 -is($node_primary->safe_psql( - 'postgres', - qq(SELECT total_bytes > 0, stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1') -), qq(t|t), qq(Total bytes is > 0 and stats_reset is NULL for slot '$stats_test_slot1'.)); +is( $node_primary->safe_psql( + 'postgres', + qq(SELECT total_bytes > 0, stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1') + ), + qq(t|t), + qq(Total bytes is > 0 and stats_reset is NULL for slot '$stats_test_slot1'.) +); # Do reset of stats for stats test slot 1 -$node_primary->safe_psql( - 'postgres', - qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1')) -); +$node_primary->safe_psql('postgres', + qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1'))); # Get reset value after reset -my $reset1 = $node_primary->safe_psql( - 'postgres', +my $reset1 = $node_primary->safe_psql('postgres', qq(SELECT stats_reset FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1') ); # Do reset again -$node_primary->safe_psql( - 'postgres', - qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1')) -); +$node_primary->safe_psql('postgres', + qq(SELECT pg_stat_reset_replication_slot('$stats_test_slot1'))); -is($node_primary->safe_psql( - 'postgres', - qq(SELECT stats_reset > '$reset1'::timestamptz, total_bytes = 0 FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1') -), qq(t|t), qq(Check that reset timestamp is later after the second reset of stats for slot '$stats_test_slot1' and confirm total_bytes was set to 0.)); +is( $node_primary->safe_psql( + 'postgres', + qq(SELECT stats_reset > '$reset1'::timestamptz, total_bytes = 0 FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1') + ), + qq(t|t), + qq(Check that reset timestamp is later after the second reset of stats for slot '$stats_test_slot1' and confirm total_bytes was set to 0.) +); # Check that test slot 2 has NULL in reset timestamp -is($node_primary->safe_psql( - 'postgres', - qq(SELECT stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2') -), qq(t), qq(Stats_reset is NULL for slot '$stats_test_slot2' before reset.)); +is( $node_primary->safe_psql( + 'postgres', + qq(SELECT stats_reset IS NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2') + ), + qq(t), + qq(Stats_reset is NULL for slot '$stats_test_slot2' before reset.)); # Get reset value again for test slot 1 -$reset1 = $node_primary->safe_psql( - 'postgres', +$reset1 = $node_primary->safe_psql('postgres', qq(SELECT stats_reset FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1') ); # Reset stats for all replication slots -$node_primary->safe_psql( - 'postgres', - qq(SELECT pg_stat_reset_replication_slot(NULL)) -); +$node_primary->safe_psql('postgres', + qq(SELECT pg_stat_reset_replication_slot(NULL))); # Check that test slot 2 reset timestamp is no longer NULL after reset -is($node_primary->safe_psql( - 'postgres', - qq(SELECT stats_reset IS NOT NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2') -), qq(t), qq(Stats_reset is not NULL for slot '$stats_test_slot2' after reset all.)); - -is($node_primary->safe_psql( - 'postgres', - qq(SELECT stats_reset > '$reset1'::timestamptz FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1') -), qq(t), qq(Check that reset timestamp is later after resetting stats for slot '$stats_test_slot1' again.)); +is( $node_primary->safe_psql( + 'postgres', + qq(SELECT stats_reset IS NOT NULL FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot2') + ), + qq(t), + qq(Stats_reset is not NULL for slot '$stats_test_slot2' after reset all.) +); + +is( $node_primary->safe_psql( + 'postgres', + qq(SELECT stats_reset > '$reset1'::timestamptz FROM pg_stat_replication_slots WHERE slot_name = '$stats_test_slot1') + ), + qq(t), + qq(Check that reset timestamp is later after resetting stats for slot '$stats_test_slot1' again.) +); # done with the node $node_primary->stop; diff --git a/src/test/recovery/t/013_crash_restart.pl b/src/test/recovery/t/013_crash_restart.pl index 10da6cb0c1..c22844d39c 100644 --- a/src/test/recovery/t/013_crash_restart.pl +++ b/src/test/recovery/t/013_crash_restart.pl @@ -66,7 +66,8 @@ CREATE TABLE alive(status text); INSERT INTO alive VALUES($$committed-before-sigquit$$); SELECT pg_backend_pid(); ]; -ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), +ok( pump_until( + $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), 'acquired pid for SIGQUIT'); my $pid = $killme_stdout; chomp($pid); @@ -78,7 +79,9 @@ $killme_stdin .= q[ BEGIN; INSERT INTO alive VALUES($$in-progress-before-sigquit$$) RETURNING status; ]; -ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigquit/m), +ok( pump_until( + $killme, $psql_timeout, + \$killme_stdout, qr/in-progress-before-sigquit/m), 'inserted in-progress-before-sigquit'); $killme_stdout = ''; $killme_stderr = ''; @@ -91,7 +94,8 @@ $monitor_stdin .= q[ SELECT $$psql-connected$$; SELECT pg_sleep(3600); ]; -ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m), +ok( pump_until( + $monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m), 'monitor connected'); $monitor_stdout = ''; $monitor_stderr = ''; @@ -145,7 +149,8 @@ $monitor->run(); $killme_stdin .= q[ SELECT pg_backend_pid(); ]; -ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), +ok( pump_until( + $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), "acquired pid for SIGKILL"); $pid = $killme_stdout; chomp($pid); @@ -158,7 +163,9 @@ INSERT INTO alive VALUES($$committed-before-sigkill$$) RETURNING status; BEGIN; INSERT INTO alive VALUES($$in-progress-before-sigkill$$) RETURNING status; ]; -ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), +ok( pump_until( + $killme, $psql_timeout, + \$killme_stdout, qr/in-progress-before-sigkill/m), 'inserted in-progress-before-sigkill'); $killme_stdout = ''; $killme_stderr = ''; @@ -170,7 +177,8 @@ $monitor_stdin .= q[ SELECT $$psql-connected$$; SELECT pg_sleep(3600); ]; -ok(pump_until($monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m), +ok( pump_until( + $monitor, $psql_timeout, \$monitor_stdout, qr/psql-connected/m), 'monitor connected'); $monitor_stdout = ''; $monitor_stderr = ''; diff --git a/src/test/recovery/t/014_unlogged_reinit.pl b/src/test/recovery/t/014_unlogged_reinit.pl index 0dca3f69fe..72895104ed 100644 --- a/src/test/recovery/t/014_unlogged_reinit.pl +++ b/src/test/recovery/t/014_unlogged_reinit.pl @@ -44,7 +44,8 @@ is($node->safe_psql('postgres', "SELECT nextval('seq_unlogged')"), my $tablespaceDir = PostgreSQL::Test::Utils::tempdir; -$node->safe_psql('postgres', "CREATE TABLESPACE ts1 LOCATION '$tablespaceDir'"); +$node->safe_psql('postgres', + "CREATE TABLESPACE ts1 LOCATION '$tablespaceDir'"); $node->safe_psql('postgres', 'CREATE UNLOGGED TABLE ts1_unlogged (id int) TABLESPACE ts1'); diff --git a/src/test/recovery/t/019_replslot_limit.pl b/src/test/recovery/t/019_replslot_limit.pl index 5654f3b545..6bbf55c3ee 100644 --- a/src/test/recovery/t/019_replslot_limit.pl +++ b/src/test/recovery/t/019_replslot_limit.pl @@ -347,16 +347,18 @@ while (1) my ($stdout, $stderr); $senderpid = $node_primary3->safe_psql('postgres', - "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walsender'"); + "SELECT pid FROM pg_stat_activity WHERE backend_type = 'walsender'"); last if $senderpid =~ qr/^[0-9]+$/; diag "multiple walsenders active in iteration $i"; # show information about all active connections - $node_primary3->psql('postgres', - "\\a\\t\nSELECT * FROM pg_stat_activity", - stdout => \$stdout, stderr => \$stderr); + $node_primary3->psql( + 'postgres', + "\\a\\t\nSELECT * FROM pg_stat_activity", + stdout => \$stdout, + stderr => \$stderr); diag $stdout, $stderr; # unlikely that the problem would resolve after 15s, so give up at point diff --git a/src/test/recovery/t/022_crash_temp_files.pl b/src/test/recovery/t/022_crash_temp_files.pl index 24fb141785..53a55c7a8a 100644 --- a/src/test/recovery/t/022_crash_temp_files.pl +++ b/src/test/recovery/t/022_crash_temp_files.pl @@ -53,7 +53,8 @@ my $killme = IPC::Run::start( $killme_stdin .= q[ SELECT pg_backend_pid(); ]; -ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), +ok( pump_until( + $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), 'acquired pid for SIGKILL'); my $pid = $killme_stdout; chomp($pid); @@ -82,7 +83,8 @@ BEGIN; INSERT INTO tab_crash (a) VALUES(1); SELECT $$insert-tuple-to-lock-next-insert$$; ]; -pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m); +pump_until($killme2, $psql_timeout, \$killme_stdout2, + qr/insert-tuple-to-lock-next-insert/m); $killme_stdout2 = ''; $killme_stderr2 = ''; @@ -95,7 +97,9 @@ BEGIN; SELECT $$in-progress-before-sigkill$$; INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i); ]; -ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), +ok( pump_until( + $killme, $psql_timeout, + \$killme_stdout, qr/in-progress-before-sigkill/m), 'insert in-progress-before-sigkill'); $killme_stdout = ''; $killme_stderr = ''; @@ -117,7 +121,8 @@ END; $c$; SELECT $$insert-tuple-lock-waiting$$; ]; -pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-lock-waiting/m); +pump_until($killme2, $psql_timeout, \$killme_stdout2, + qr/insert-tuple-lock-waiting/m); $killme_stdout2 = ''; $killme_stderr2 = ''; @@ -167,7 +172,8 @@ $killme->run(); $killme_stdin .= q[ SELECT pg_backend_pid(); ]; -ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), +ok( pump_until( + $killme, $psql_timeout, \$killme_stdout, qr/[[:digit:]]+[\r\n]$/m), 'acquired pid for SIGKILL'); $pid = $killme_stdout; chomp($pid); @@ -184,7 +190,8 @@ BEGIN; INSERT INTO tab_crash (a) VALUES(1); SELECT $$insert-tuple-to-lock-next-insert$$; ]; -pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-to-lock-next-insert/m); +pump_until($killme2, $psql_timeout, \$killme_stdout2, + qr/insert-tuple-to-lock-next-insert/m); $killme_stdout2 = ''; $killme_stderr2 = ''; @@ -197,7 +204,9 @@ BEGIN; SELECT $$in-progress-before-sigkill$$; INSERT INTO tab_crash (a) SELECT i FROM generate_series(1, 5000) s(i); ]; -ok(pump_until($killme, $psql_timeout, \$killme_stdout, qr/in-progress-before-sigkill/m), +ok( pump_until( + $killme, $psql_timeout, + \$killme_stdout, qr/in-progress-before-sigkill/m), 'insert in-progress-before-sigkill'); $killme_stdout = ''; $killme_stderr = ''; @@ -219,7 +228,8 @@ END; $c$; SELECT $$insert-tuple-lock-waiting$$; ]; -pump_until($killme2, $psql_timeout, \$killme_stdout2, qr/insert-tuple-lock-waiting/m); +pump_until($killme2, $psql_timeout, \$killme_stdout2, + qr/insert-tuple-lock-waiting/m); $killme_stdout2 = ''; $killme_stderr2 = ''; diff --git a/src/test/recovery/t/027_stream_regress.pl b/src/test/recovery/t/027_stream_regress.pl index be9799c0a4..fdb4ea0bf5 100644 --- a/src/test/recovery/t/027_stream_regress.pl +++ b/src/test/recovery/t/027_stream_regress.pl @@ -19,7 +19,8 @@ $node_primary->init(allows_streaming => 1); # Increase some settings that Cluster->new makes too low by default. $node_primary->adjust_conf('postgresql.conf', 'max_connections', '25'); -$node_primary->append_conf('postgresql.conf', 'max_prepared_transactions = 10'); +$node_primary->append_conf('postgresql.conf', + 'max_prepared_transactions = 10'); # We'll stick with Cluster->new's small default shared_buffers, but since that # makes synchronized seqscans more probable, it risks changing the results of # some test queries. Disable synchronized seqscans to prevent that. @@ -27,18 +28,19 @@ $node_primary->append_conf('postgresql.conf', 'synchronize_seqscans = off'); # WAL consistency checking is resource intensive so require opt-in with the # PG_TEST_EXTRA environment variable. -if ($ENV{PG_TEST_EXTRA} && - $ENV{PG_TEST_EXTRA} =~ m/\bwal_consistency_checking\b/) { +if ( $ENV{PG_TEST_EXTRA} + && $ENV{PG_TEST_EXTRA} =~ m/\bwal_consistency_checking\b/) +{ $node_primary->append_conf('postgresql.conf', 'wal_consistency_checking = all'); } $node_primary->start; is( $node_primary->psql( - 'postgres', - qq[SELECT pg_create_physical_replication_slot('standby_1');]), - 0, - 'physical slot created on primary'); + 'postgres', + qq[SELECT pg_create_physical_replication_slot('standby_1');]), + 0, + 'physical slot created on primary'); my $backup_name = 'my_backup'; # Take backup @@ -49,25 +51,29 @@ my $node_standby_1 = PostgreSQL::Test::Cluster->new('standby_1'); $node_standby_1->init_from_backup($node_primary, $backup_name, has_streaming => 1); $node_standby_1->append_conf('postgresql.conf', - "primary_slot_name = standby_1"); + "primary_slot_name = standby_1"); $node_standby_1->append_conf('postgresql.conf', 'max_standby_streaming_delay = 600s'); $node_standby_1->start; -my $dlpath = dirname($ENV{REGRESS_SHLIB}); +my $dlpath = dirname($ENV{REGRESS_SHLIB}); my $outputdir = $PostgreSQL::Test::Utils::tmp_check; # Run the regression tests against the primary. my $extra_opts = $ENV{EXTRA_REGRESS_OPTS} || ""; -my $rc = system($ENV{PG_REGRESS} . " $extra_opts " . - "--dlpath=\"$dlpath\" " . - "--bindir= " . - "--host=" . $node_primary->host . " " . - "--port=" . $node_primary->port . " " . - "--schedule=../regress/parallel_schedule " . - "--max-concurrent-tests=20 " . - "--inputdir=../regress " . - "--outputdir=\"$outputdir\""); +my $rc = + system($ENV{PG_REGRESS} + . " $extra_opts " + . "--dlpath=\"$dlpath\" " + . "--bindir= " + . "--host=" + . $node_primary->host . " " + . "--port=" + . $node_primary->port . " " + . "--schedule=../regress/parallel_schedule " + . "--max-concurrent-tests=20 " + . "--inputdir=../regress " + . "--outputdir=\"$outputdir\""); if ($rc != 0) { # Dump out the regression diffs file, if there is one @@ -92,12 +98,16 @@ $node_primary->wait_for_catchup($node_standby_1, 'replay', # Perform a logical dump of primary and standby, and check that they match command_ok( - [ 'pg_dumpall', '-f', $outputdir . '/primary.dump', '--no-sync', - '-p', $node_primary->port ], + [ + 'pg_dumpall', '-f', $outputdir . '/primary.dump', + '--no-sync', '-p', $node_primary->port + ], 'dump primary server'); command_ok( - [ 'pg_dumpall', '-f', $outputdir . '/standby.dump', '--no-sync', - '-p', $node_standby_1->port ], + [ + 'pg_dumpall', '-f', $outputdir . '/standby.dump', + '--no-sync', '-p', $node_standby_1->port + ], 'dump standby server'); command_ok( [ 'diff', $outputdir . '/primary.dump', $outputdir . '/standby.dump' ], diff --git a/src/test/recovery/t/029_stats_restart.pl b/src/test/recovery/t/029_stats_restart.pl index 2fe8db8807..1bf7b568cc 100644 --- a/src/test/recovery/t/029_stats_restart.pl +++ b/src/test/recovery/t/029_stats_restart.pl @@ -273,7 +273,8 @@ $sect = "post immediate restart"; my $wal_restart_immediate = wal_stats(); cmp_ok( - $wal_reset_restart->{reset}, 'lt', + $wal_reset_restart->{reset}, + 'lt', $wal_restart_immediate->{reset}, "$sect: reset timestamp is new"); diff --git a/src/test/recovery/t/031_recovery_conflict.pl b/src/test/recovery/t/031_recovery_conflict.pl index 8dcb3da0de..545d523edf 100644 --- a/src/test/recovery/t/031_recovery_conflict.pl +++ b/src/test/recovery/t/031_recovery_conflict.pl @@ -229,8 +229,10 @@ $expected_conflicts++; # Want to test recovery deadlock conflicts, not buffer pin conflicts. Without # changing max_standby_streaming_delay it'd be timing dependent what we hit # first -$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay', - "${PostgreSQL::Test::Utils::timeout_default}s"); +$node_standby->adjust_conf( + 'postgresql.conf', + 'max_standby_streaming_delay', + "${PostgreSQL::Test::Utils::timeout_default}s"); $node_standby->restart(); reconnect_and_clear(); @@ -289,7 +291,8 @@ check_conflict_stat("deadlock"); # clean up for next tests $node_primary->safe_psql($test_db, qq[ROLLBACK PREPARED 'lock';]); -$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay', '50ms'); +$node_standby->adjust_conf('postgresql.conf', 'max_standby_streaming_delay', + '50ms'); $node_standby->restart(); reconnect_and_clear(); diff --git a/src/test/recovery/t/032_relfilenode_reuse.pl b/src/test/recovery/t/032_relfilenode_reuse.pl index ac9340b7dd..ae7e32763f 100644 --- a/src/test/recovery/t/032_relfilenode_reuse.pl +++ b/src/test/recovery/t/032_relfilenode_reuse.pl @@ -8,7 +8,8 @@ use File::Basename; my $node_primary = PostgreSQL::Test::Cluster->new('primary'); $node_primary->init(allows_streaming => 1); -$node_primary->append_conf('postgresql.conf', q[ +$node_primary->append_conf( + 'postgresql.conf', q[ allow_in_place_tablespaces = true log_connections=on # to avoid "repairing" corruption @@ -61,28 +62,28 @@ $psql_standby{run} = IPC::Run::start( # rows. Using a template database + preexisting rows makes it a bit easier to # reproduce, because there's no cache invalidations generated. -$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db_template OID = 50000;"); -$node_primary->safe_psql('conflict_db_template', q[ +$node_primary->safe_psql('postgres', + "CREATE DATABASE conflict_db_template OID = 50000;"); +$node_primary->safe_psql( + 'conflict_db_template', q[ CREATE TABLE large(id serial primary key, dataa text, datab text); - INSERT INTO large(dataa, datab) SELECT g.i::text, 1 FROM generate_series(1, 4000) g(i);]); -$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;"); + INSERT INTO large(dataa, datab) SELECT g.i::text, 1 FROM generate_series(1, 4000) g(i);] +); +$node_primary->safe_psql('postgres', + "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;"); -$node_primary->safe_psql('postgres', q[ +$node_primary->safe_psql( + 'postgres', q[ CREATE EXTENSION pg_prewarm; CREATE TABLE replace_sb(data text); - INSERT INTO replace_sb(data) SELECT random()::text FROM generate_series(1, 15000);]); + INSERT INTO replace_sb(data) SELECT random()::text FROM generate_series(1, 15000);] +); $node_primary->wait_for_catchup($node_standby); # Use longrunning transactions, so that AtEOXact_SMgr doesn't close files -send_query_and_wait( - \%psql_primary, - q[BEGIN;], - qr/BEGIN/m); -send_query_and_wait( - \%psql_standby, - q[BEGIN;], - qr/BEGIN/m); +send_query_and_wait(\%psql_primary, q[BEGIN;], qr/BEGIN/m); +send_query_and_wait(\%psql_standby, q[BEGIN;], qr/BEGIN/m); # Cause lots of dirty rows in shared_buffers $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 1;"); @@ -94,10 +95,10 @@ cause_eviction(\%psql_primary, \%psql_standby); # drop and recreate database $node_primary->safe_psql('postgres', "DROP DATABASE conflict_db;"); -$node_primary->safe_psql('postgres', "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;"); +$node_primary->safe_psql('postgres', + "CREATE DATABASE conflict_db TEMPLATE conflict_db_template OID = 50001;"); -verify($node_primary, $node_standby, 1, - "initial contents as expected"); +verify($node_primary, $node_standby, 1, "initial contents as expected"); # Again cause lots of dirty rows in shared_buffers, but use a different update # value so we can check everything is OK @@ -109,17 +110,17 @@ $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 2;"); cause_eviction(\%psql_primary, \%psql_standby); verify($node_primary, $node_standby, 2, - "update to reused relfilenode (due to DB oid conflict) is not lost"); + "update to reused relfilenode (due to DB oid conflict) is not lost"); $node_primary->safe_psql('conflict_db', "VACUUM FULL large;"); $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 3;"); -verify($node_primary, $node_standby, 3, - "restored contents as expected"); +verify($node_primary, $node_standby, 3, "restored contents as expected"); # Test for old filehandles after moving a database in / out of tablespace -$node_primary->safe_psql('postgres', q[CREATE TABLESPACE test_tablespace LOCATION '']); +$node_primary->safe_psql('postgres', + q[CREATE TABLESPACE test_tablespace LOCATION '']); # cause dirty buffers $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 4;"); @@ -127,23 +128,25 @@ $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 4;"); cause_eviction(\%psql_primary, \%psql_standby); # move database back / forth -$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace'); -$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE pg_default'); +$node_primary->safe_psql('postgres', + 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace'); +$node_primary->safe_psql('postgres', + 'ALTER DATABASE conflict_db SET TABLESPACE pg_default'); # cause dirty buffers $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 5;"); cause_eviction(\%psql_primary, \%psql_standby); -verify($node_primary, $node_standby, 5, - "post move contents as expected"); +verify($node_primary, $node_standby, 5, "post move contents as expected"); -$node_primary->safe_psql('postgres', 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace'); +$node_primary->safe_psql('postgres', + 'ALTER DATABASE conflict_db SET TABLESPACE test_tablespace'); $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 7;"); cause_eviction(\%psql_primary, \%psql_standby); $node_primary->safe_psql('conflict_db', "UPDATE large SET datab = 8;"); -$node_primary->safe_psql('postgres', 'DROP DATABASE conflict_db'); -$node_primary->safe_psql('postgres', 'DROP TABLESPACE test_tablespace'); +$node_primary->safe_psql('postgres', 'DROP DATABASE conflict_db'); +$node_primary->safe_psql('postgres', 'DROP TABLESPACE test_tablespace'); $node_primary->safe_psql('postgres', 'REINDEX TABLE pg_database'); @@ -160,25 +163,28 @@ $node_standby->stop(); # Make sure that there weren't crashes during shutdown -command_like([ 'pg_controldata', $node_primary->data_dir ], - qr/Database cluster state:\s+shut down\n/, 'primary shut down ok'); -command_like([ 'pg_controldata', $node_standby->data_dir ], - qr/Database cluster state:\s+shut down in recovery\n/, 'standby shut down ok'); +command_like( + [ 'pg_controldata', $node_primary->data_dir ], + qr/Database cluster state:\s+shut down\n/, + 'primary shut down ok'); +command_like( + [ 'pg_controldata', $node_standby->data_dir ], + qr/Database cluster state:\s+shut down in recovery\n/, + 'standby shut down ok'); done_testing(); sub verify { my ($primary, $standby, $counter, $message) = @_; - my $query = "SELECT datab, count(*) FROM large GROUP BY 1 ORDER BY 1 LIMIT 10"; + my $query = + "SELECT datab, count(*) FROM large GROUP BY 1 ORDER BY 1 LIMIT 10"; is($primary->safe_psql('conflict_db', $query), - "$counter|4000", - "primary: $message"); + "$counter|4000", "primary: $message"); $primary->wait_for_catchup($standby); is($standby->safe_psql('conflict_db', $query), - "$counter|4000", - "standby: $message"); + "$counter|4000", "standby: $message"); } sub cause_eviction |
