Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 12 additions & 1 deletion .github/workflows/tap-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@ jobs:
NEW_INSTALL_DIR: psql_target
ENGINE_BRANCH_OLD: BABEL_2_6_STABLE__PG_14_9
EXTENSION_BRANCH_OLD: BABEL_2_6_STABLE
INSTALL_DIR_16: psql16
ENGINE_BRANCH_16: BABEL_4_6_STABLE__PG_16_9

runs-on: ubuntu-22.04
steps:
Expand Down Expand Up @@ -36,9 +38,17 @@ jobs:
sudo -E apt-get install krb5-admin-server krb5-kdc krb5-user libkrb5-dev -y -qq
shell: bash

- name: Build Modified Postgres using ${{env.ENGINE_BRANCH_16}}
id: build-modified-postgres-16
if: always() && steps.install-kerberos-dependencies.outcome == 'success'
uses: ./.github/composite-actions/build-modified-postgres
with:
engine_branch: ${{env.ENGINE_BRANCH_16}}
install_dir: ${{env.INSTALL_DIR_16}}

- name: Build Modified Postgres using ${{env.ENGINE_BRANCH_OLD}}
id: build-modified-postgres-old
if: always() && steps.install-kerberos-dependencies.outcome == 'success'
if: always() && steps.build-modified-postgres-16.outcome == 'success'
uses: ./.github/composite-actions/build-modified-postgres
with:
engine_branch: ${{env.ENGINE_BRANCH_OLD}}
Expand Down Expand Up @@ -150,6 +160,7 @@ jobs:
export PG_CONFIG=~/${{env.NEW_INSTALL_DIR}}/bin/pg_config
export PATH=/opt/mssql-tools/bin:$PATH
export oldinstall=$HOME/${{env.OLD_INSTALL_DIR}}
export installdir16=$HOME/${{env.INSTALL_DIR_16}}

cd contrib/babelfishpg_tds
make installcheck PROVE_TESTS="t/001_tdspasswd.pl t/002_tdskerberos.pl t/003_bbfextnotloaded.pl t/004_bbfdumprestore.pl"
Expand Down
37 changes: 23 additions & 14 deletions contrib/babelfishpg_tds/test/t/004_bbfdumprestore.pl
Original file line number Diff line number Diff line change
Expand Up @@ -65,20 +65,29 @@
$tsql_newnode->init_tsql('test_master', 'testdb');
$newnode->stop;

# Setup pg16 node
my $node16 =
PostgreSQL::Test::Cluster->new('node_16',
install_path => $ENV{installdir16});

my $newbindir = $newnode->config_data('--bindir');
my $oldbindir = $oldnode->config_data('--bindir');
my $bindir16 = $node16->config_data('--bindir');

# Dump global objects using pg_dumpall. Note that we
# need to use dump utilities from the new node here.
$oldnode->start;
my @dumpall_command = (
'pg_dumpall', '--database', 'testdb', '--username', 'test_master',
$bindir16 . '/pg_dumpall', '--database', 'testdb', '--username', 'test_master',
'--port', $oldnode->port, '--roles-only', '--quote-all-identifiers',
'--verbose', '--no-role-passwords', '--file', $dump1_file);
$newnode->command_ok(\@dumpall_command, 'Dump global objects.');
$node16->command_ok(\@dumpall_command, 'Dump global objects.');
# Dump Babelfish database using pg_dump.
my @dump_command = (
'pg_dump', '--username', 'test_master', '--quote-all-identifiers',
$bindir16 . '/pg_dump', '--username', 'test_master', '--quote-all-identifiers',
'--port', $oldnode->port, '--verbose', '--dbname', 'testdb',
'--file', $dump2_file);
$newnode->command_ok(\@dump_command, 'Dump Babelfish database.');
$node16->command_ok(\@dump_command, 'Dump Babelfish database.');
$oldnode->stop;

# Retore the dumped files on the new server.
Expand All @@ -88,7 +97,7 @@
# dump/restore is not yet supported.
$newnode->command_fails_like(
[
'psql',
$newbindir . '/psql',
'-d', 'testdb',
'-U', 'test_master',
'-p', $newnode->port,
Expand All @@ -101,7 +110,7 @@
# Similarly, restore of dump file should also cause a failure.
$newnode->command_fails_like(
[
'psql',
$newbindir . '/psql',
'-d', 'testdb',
'-U', 'test_master',
'-p', $newnode->port,
Expand All @@ -117,7 +126,7 @@

$oldnode->command_fails_like(
[
'psql',
$oldbindir . '/psql',
'-d', 'testdb',
'-U', 'test_master',
'-p', $oldnode->port,
Expand All @@ -129,7 +138,7 @@

$oldnode->command_fails_like(
[
'psql',
$oldbindir . '/psql',
'-d', 'testdb',
'-U', 'test_master',
'-p', $oldnode->port,
Expand Down Expand Up @@ -162,14 +171,14 @@
# Dump global objects using pg_dumpall. Note that we
# need to use dump utilities from the new node here.
@dumpall_command = (
'pg_dumpall', '--database', 'testdb', '--username', 'test_master',
$newbindir . '/pg_dumpall', '--database', 'testdb', '--username', 'test_master',
'--port', $newnode2->port, '--roles-only', '--quote-all-identifiers',
'--verbose', '--no-role-passwords', '--file', $dump3_file);
$newnode2->command_ok(\@dumpall_command, 'Dump global objects.');
# Dump Babelfish database using pg_dump. Let's dump with the custom format
# this time so that we cover pg_restore as well.
@dump_command = (
'pg_dump', '--username', 'test_master', '--quote-all-identifiers',
$newbindir . '/pg_dump', '--username', 'test_master', '--quote-all-identifiers',
'--port', $newnode2->port, '--verbose', '--dbname', 'testdb',
'--format', 'custom', '--file', $dump4_file);
$newnode2->command_ok(\@dump_command, 'Dump Babelfish database.');
Expand All @@ -182,7 +191,7 @@
# dump/restore is not yet supported.
$newnode->command_fails_like(
[
'psql',
$newbindir . '/psql',
'-d', 'testdb',
'-U', 'test_master',
'-p', $newnode->port,
Expand All @@ -195,7 +204,7 @@
# Similarly, restore of dump file should also cause a failure.
$newnode->command_fails_like(
[
'pg_restore',
$newbindir . '/pg_restore',
'-d', 'testdb',
'-U', 'test_master',
'-p', $newnode->port,
Expand All @@ -213,13 +222,13 @@

# Dump global objects using pg_dumpall.
@dumpall_command = (
'pg_dumpall', '--database', 'postgres', '--port', $newnode->port,
$newbindir . '/pg_dumpall', '--database', 'postgres', '--port', $newnode->port,
'--roles-only', '--quote-all-identifiers', '--verbose',
'--no-role-passwords', '--file', $dump1_file);
$newnode->command_ok(\@dumpall_command, 'Dump global objects.');
# Dump Babelfish database using pg_dump.
@dump_command = (
'pg_dump', '--quote-all-identifiers', '--port', $newnode->port,
$newbindir . '/pg_dump', '--quote-all-identifiers', '--port', $newnode->port,
'--verbose', '--dbname', 'postgres',
'--file', $dump2_file);
$newnode->command_ok(\@dump_command, 'Dump non-Babelfish (postgres db) database.');
Expand Down
Loading