diff --git a/.github/workflows/ami-release-nix.yml b/.github/workflows/ami-release-nix.yml new file mode 100644 index 000000000..d500af738 --- /dev/null +++ b/.github/workflows/ami-release-nix.yml @@ -0,0 +1,135 @@ +name: Release AMI Nix + +on: + push: + branches: + - develop + paths: + - '.github/workflows/ami-release-nix.yml' + - 'common-nix.vars.pkr.hcl' + workflow_dispatch: + +jobs: + build: + strategy: + matrix: + include: + - runner: arm-runner + arch: arm64 + ubuntu_release: focal + ubuntu_version: 20.04 + mcpu: neoverse-n1 + runs-on: ${{ matrix.runner }} + timeout-minutes: 150 + permissions: + contents: write + packages: write + id-token: write + + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + + - name: Run checks if triggered manually + if: ${{ github.event_name == 'workflow_dispatch' }} + # Update `ci.yaml` too if changing constraints. + run: | + SUFFIX=$(sed -E 's/postgres-version = "[0-9\.]+(.*)"/\1/g' common-nix.vars.pkr.hcl) + if [[ -z $SUFFIX ]] ; then + echo "Version must include non-numeric characters if built manually." + exit 1 + fi + + # extensions are build in nix prior to this step + # so we can just use the binaries from the nix store + # for postgres, extensions and wrappers + + - name: Build AMI stage 1 + run: | + packer init amazon-arm64-nix.pkr.hcl + GIT_SHA=${{github.sha}} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=" amazon-arm64-nix.pkr.hcl + + - name: Build AMI stage 2 + run: | + packer init stage2-nix-psql.pkr.hcl + GIT_SHA=${{github.sha}} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" stage2-nix-psql.pkr.hcl + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(sed -e 's/postgres-version = "\(.*\)"/\1/g' common-nix.vars.pkr.hcl) + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: Create nix flake revision tarball + run: | + GIT_SHA=${{github.sha}} + MAJOR_VERSION=$(echo "${{ steps.process_release_version.outputs.version }}" | cut -d. -f1) + + mkdir -p "/tmp/pg_upgrade_bin/${MAJOR_VERSION}" + echo "$GIT_SHA" >> "/tmp/pg_upgrade_bin/${MAJOR_VERSION}/nix_flake_version" + tar -czf "/tmp/pg_binaries.tar.gz" -C "/tmp/pg_upgrade_bin" . + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 staging + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.ARTIFACTS_BUCKET }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 staging + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + + #Our self hosted github runner already has permissions to publish images + #but they're limited to only that; + #so if we want s3 access we'll need to config credentials with the below steps + # (which overwrites existing perms) after the ami build + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload software manifest to s3 prod + run: | + cd ansible + ansible-playbook -i localhost \ + -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + -e "internal_artifacts_bucket=${{ secrets.PROD_ARTIFACTS_BUCKET }}" \ + manifest-playbook.yml + + - name: Upload nix flake revision to s3 prod + run: | + aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + + - name: Create release + uses: softprops/action-gh-release@v1 + with: + name: ${{ steps.process_release_version.outputs.version }} + tag_name: ${{ steps.process_release_version.outputs.version }} + target_commitish: ${{github.sha}} + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Building Postgres AMI failed' + SLACK_FOOTER: '' + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --instance-ids {} diff --git a/.github/workflows/nix-build.yml b/.github/workflows/nix-build.yml index 93f6549e7..0d6336cd0 100644 --- a/.github/workflows/nix-build.yml +++ b/.github/workflows/nix-build.yml @@ -25,7 +25,11 @@ jobs: steps: - name: Check out code - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.ref || github.ref }} + fetch-depth: 0 + fetch-tags: true - name: aws-creds uses: aws-actions/configure-aws-credentials@v4 with: diff --git a/.github/workflows/nix-cache-upload.yml b/.github/workflows/nix-cache-upload.yml deleted file mode 100644 index ea7f628e9..000000000 --- a/.github/workflows/nix-cache-upload.yml +++ /dev/null @@ -1,52 +0,0 @@ -name: Nix Cache upload - -on: - push: - branches: - - main - -permissions: - contents: write - packages: write - id-token: write - -jobs: - build: - strategy: - fail-fast: false - matrix: - include: - - runner: [self-hosted, X64] - arch: amd64 - - runner: arm-runner - arch: arm64 - runs-on: ${{ matrix.runner }} - name: nix-build - steps: - - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 - with: - fetch-depth: 0 - - uses: DeterminateSystems/nix-installer-action@65d7c888b2778e8cf30a07a88422ccb23499bfb8 - - uses: DeterminateSystems/magic-nix-cache-action@749fc5bbc9fa49d60c2b93f6c4bc867b82e1d295 - - name: configure aws credentials for s3 - uses: aws-actions/configure-aws-credentials@v1 - with: - role-to-assume: ${{ secrets.DEV_AWS_ROLE }} - aws-region: "us-east-1" - kvm: true - extra-conf: | - system-features = kvm - - - name: write secret key - # use python so we don't interpolate the secret into the workflow logs, in case of bugs - run: | - python -c "import os; file = open('nix-secret-key', 'w'); file.write(os.environ['NIX_SIGN_SECRET_KEY']); file.close()" - env: - NIX_SIGN_SECRET_KEY: ${{ secrets.NIX_SIGN_SECRET_KEY }} - - - name: build and copy to S3 - run: | - for x in 15 16 orioledb_16; do - nix build .#psql_$x/bin -o result-$x - done - nix copy --to s3://nix-postgres-artifacts?secret-key=nix-secret-key ./result* diff --git a/.github/workflows/publish-nix-pgupgrade-scripts.yml b/.github/workflows/publish-nix-pgupgrade-scripts.yml new file mode 100644 index 000000000..7272da0df --- /dev/null +++ b/.github/workflows/publish-nix-pgupgrade-scripts.yml @@ -0,0 +1,94 @@ +name: Publish pg_upgrade_scripts + +on: + push: + branches: + - develop + - sam/nix-and-conventional-ami + paths: + - '.github/workflows/publish-pgupgrade-scripts.yml' + - 'common-nix.vars.pkr.hcl' + workflow_dispatch: + +permissions: + id-token: write + +jobs: + publish-staging: + runs-on: ubuntu-latest + + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(grep 'postgres-version' common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing pg_upgrade scripts + run: | + mkdir -p /tmp/pg_upgrade_scripts + cp -r ansible/files/admin_api_scripts/pg_upgrade_scripts/* /tmp/pg_upgrade_scripts + tar -czvf /tmp/pg_upgrade_scripts.tar.gz -C /tmp/ pg_upgrade_scripts + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 staging + run: | + aws s3 cp /tmp/pg_upgrade_scripts.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade scripts failed' + SLACK_FOOTER: '' + + publish-prod: + runs-on: ubuntu-latest + if: github.ref_name == 'develop' + + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(grep 'postgres-version' common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + echo "version=$VERSION" >> "$GITHUB_OUTPUT" + + - name: Create a tarball containing pg_upgrade scripts + run: | + mkdir -p /tmp/pg_upgrade_scripts + cp -r ansible/files/admin_api_scripts/pg_upgrade_scripts/* /tmp/pg_upgrade_scripts + tar -czvf /tmp/pg_upgrade_scripts.tar.gz -C /tmp/ pg_upgrade_scripts + + - name: configure aws credentials - prod + uses: aws-actions/configure-aws-credentials@v1 + with: + role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Upload pg_upgrade scripts to s3 prod + run: | + aws s3 cp /tmp/pg_upgrade_scripts.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/pg_upgrade_scripts.tar.gz + + - name: Slack Notification on Failure + if: ${{ failure() }} + uses: rtCamp/action-slack-notify@v2 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + SLACK_USERNAME: 'gha-failures-notifier' + SLACK_COLOR: 'danger' + SLACK_MESSAGE: 'Publishing pg_upgrade scripts failed' + SLACK_FOOTER: '' diff --git a/.github/workflows/testinfra-nix.yml b/.github/workflows/testinfra-nix.yml new file mode 100644 index 000000000..cd6a903f6 --- /dev/null +++ b/.github/workflows/testinfra-nix.yml @@ -0,0 +1,88 @@ +name: Testinfra Integration Tests Nix + +on: + pull_request: + workflow_dispatch: + +jobs: + test-ami-nix: + strategy: + fail-fast: false + matrix: + include: + - runner: arm-runner + arch: arm64 + ubuntu_release: focal + ubuntu_version: 20.04 + mcpu: neoverse-n1 + runs-on: ${{ matrix.runner }} + timeout-minutes: 150 + permissions: + contents: write + packages: write + id-token: write + + steps: + - name: Checkout Repo + uses: actions/checkout@v4 + + - id: args + uses: mikefarah/yq@master + with: + cmd: yq 'to_entries | map(select(.value|type == "!!str")) | map(.key + "=" + .value) | join("\n")' 'ansible/vars.yml' + + - run: docker context create builders + + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + + - name: Build AMI stage 1 + run: | + packer init amazon-arm64-nix.pkr.hcl + GIT_SHA=${{github.sha}} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=" -var "postgres-version=ci-ami-test" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" amazon-arm64-nix.pkr.hcl + + - name: Build AMI stage 2 + run: | + packer init stage2-nix-psql.pkr.hcl + GIT_SHA=${{github.sha}} + packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "postgres-version=ci-ami-test" -var "region=ap-southeast-1" -var 'ami_regions=["ap-southeast-1"]' -var "force-deregister=true" -var "git_sha=${GITHUB_SHA}" stage2-nix-psql.pkr.hcl + + - name: Run tests + timeout-minutes: 10 + run: | + # TODO: use poetry for pkg mgmt + pip3 install boto3 boto3-stubs[essential] docker ec2instanceconnectcli pytest pytest-testinfra[paramiko,docker] requests + pytest -vv -s testinfra/test_ami_nix.py + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --region ap-southeast-1 --instance-ids {} + + - name: Cleanup resources on build cancellation + if: ${{ always() }} + run: | + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:testinfra-run-id,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --region ap-southeast-1 --instance-ids {} || true + + - name: Cleanup AMIs + if: always() + run: | + # Define AMI name patterns + STAGE1_AMI_NAME="supabase-postgres-ci-ami-test-stage-1" + STAGE2_AMI_NAME="supabase-postgres-ci-ami-test-nix" + + # Function to deregister AMIs by name pattern + deregister_ami_by_name() { + local ami_name_pattern=$1 + local ami_ids=$(aws ec2 describe-images --region ap-southeast-1 --owners self --filters "Name=name,Values=${ami_name_pattern}" --query 'Images[*].ImageId' --output text) + for ami_id in $ami_ids; do + echo "Deregistering AMI: $ami_id" + aws ec2 deregister-image --region ap-southeast-1 --image-id $ami_id + done + } + + # Deregister AMIs + deregister_ami_by_name "$STAGE1_AMI_NAME" + deregister_ami_by_name "$STAGE2_AMI_NAME" \ No newline at end of file diff --git a/amazon-arm64-nix.pkr.hcl b/amazon-arm64-nix.pkr.hcl new file mode 100644 index 000000000..118196473 --- /dev/null +++ b/amazon-arm64-nix.pkr.hcl @@ -0,0 +1,277 @@ +variable "ami" { + type = string + default = "ubuntu/images/hvm-ssd/ubuntu-focal-20.04-arm64-server-*" +} + +variable "profile" { + type = string + default = "${env("AWS_PROFILE")}" +} + +variable "ami_name" { + type = string + default = "supabase-postgres" +} + +variable "ami_regions" { + type = list(string) + default = ["ap-southeast-2"] +} + +variable "ansible_arguments" { + type = string + default = "--skip-tags install-postgrest,install-pgbouncer,install-supabase-internal" +} + +variable "aws_access_key" { + type = string + default = "" +} + +variable "aws_secret_key" { + type = string + default = "" +} + +variable "environment" { + type = string + default = "prod" +} + +variable "region" { + type = string +} + +variable "build-vol" { + type = string + default = "xvdc" +} + +# ccache docker image details +variable "docker_user" { + type = string + default = "" +} + +variable "docker_passwd" { + type = string + default = "" +} + +variable "docker_image" { + type = string + default = "" +} + +variable "docker_image_tag" { + type = string + default = "latest" +} + +locals { + creator = "packer" +} + +variable "postgres-version" { + type = string + default = "" +} + +variable "git-head-version" { + type = string + default = "unknown" +} + +variable "packer-execution-id" { + type = string + default = "unknown" +} + +variable "force-deregister" { + type = bool + default = false +} + +packer { + required_plugins { + amazon = { + source = "github.com/hashicorp/amazon" + version = "~> 1" + } + } +} + +# source block +source "amazon-ebssurrogate" "source" { + profile = "${var.profile}" + #access_key = "${var.aws_access_key}" + #ami_name = "${var.ami_name}-arm64-${formatdate("YYYY-MM-DD-hhmm", timestamp())}" + ami_name = "${var.ami_name}-${var.postgres-version}-stage-1" + ami_virtualization_type = "hvm" + ami_architecture = "arm64" + ami_regions = "${var.ami_regions}" + instance_type = "c6g.4xlarge" + region = "${var.region}" + #secret_key = "${var.aws_secret_key}" + force_deregister = var.force-deregister + + # Use latest official ubuntu focal ami owned by Canonical. + source_ami_filter { + filters = { + virtualization-type = "hvm" + name = "${var.ami}" + root-device-type = "ebs" + } + owners = [ "099720109477" ] + most_recent = true + } + ena_support = true + launch_block_device_mappings { + device_name = "/dev/xvdf" + delete_on_termination = true + volume_size = 10 + volume_type = "gp3" + } + + launch_block_device_mappings { + device_name = "/dev/xvdh" + delete_on_termination = true + volume_size = 8 + volume_type = "gp3" + } + + launch_block_device_mappings { + device_name = "/dev/${var.build-vol}" + delete_on_termination = true + volume_size = 16 + volume_type = "gp2" + omit_from_artifact = true + } + + run_tags = { + creator = "packer" + appType = "postgres" + packerExecutionId = "${var.packer-execution-id}" + } + run_volume_tags = { + creator = "packer" + appType = "postgres" + } + snapshot_tags = { + creator = "packer" + appType = "postgres" + } + tags = { + creator = "packer" + appType = "postgres" + postgresVersion = "${var.postgres-version}-stage1" + sourceSha = "${var.git-head-version}" + } + + communicator = "ssh" + ssh_pty = true + ssh_username = "ubuntu" + ssh_timeout = "5m" + + ami_root_device { + source_device_name = "/dev/xvdf" + device_name = "/dev/xvda" + delete_on_termination = true + volume_size = 10 + volume_type = "gp2" + } + + associate_public_ip_address = true +} + +# a build block invokes sources and runs provisioning steps on them. +build { + sources = ["source.amazon-ebssurrogate.source"] + + provisioner "file" { + source = "ebssurrogate/files/sources-arm64.cfg" + destination = "/tmp/sources.list" + } + + provisioner "file" { + source = "ebssurrogate/files/ebsnvme-id" + destination = "/tmp/ebsnvme-id" + } + + provisioner "file" { + source = "ebssurrogate/files/70-ec2-nvme-devices.rules" + destination = "/tmp/70-ec2-nvme-devices.rules" + } + + provisioner "file" { + source = "ebssurrogate/scripts/chroot-bootstrap-nix.sh" + destination = "/tmp/chroot-bootstrap-nix.sh" + } + + provisioner "file" { + source = "ebssurrogate/files/cloud.cfg" + destination = "/tmp/cloud.cfg" + } + + provisioner "file" { + source = "ebssurrogate/files/vector.timer" + destination = "/tmp/vector.timer" + } + + provisioner "file" { + source = "ebssurrogate/files/apparmor_profiles" + destination = "/tmp" + } + + provisioner "file" { + source = "migrations" + destination = "/tmp" + } + + provisioner "file" { + source = "ebssurrogate/files/unit-tests" + destination = "/tmp" + } + + # Copy ansible playbook + provisioner "shell" { + inline = ["mkdir /tmp/ansible-playbook"] + } + + provisioner "file" { + source = "ansible" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "scripts" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "ansible/vars.yml" + destination = "/tmp/ansible-playbook/vars.yml" + } + + provisioner "shell" { + environment_vars = [ + "ARGS=${var.ansible_arguments}", + "DOCKER_USER=${var.docker_user}", + "DOCKER_PASSWD=${var.docker_passwd}", + "DOCKER_IMAGE=${var.docker_image}", + "DOCKER_IMAGE_TAG=${var.docker_image_tag}", + "POSTGRES_SUPABASE_VERSION=${var.postgres-version}" + ] + use_env_var_file = true + script = "ebssurrogate/scripts/surrogate-bootstrap-nix.sh" + execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && {{.Path}}'" + start_retry_timeout = "5m" + skip_clean = true + } + + provisioner "file" { + source = "/tmp/ansible.log" + destination = "/tmp/ansible.log" + direction = "download" + } +} diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh index 1975709e7..d67d623d1 100755 --- a/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh @@ -161,12 +161,20 @@ function apply_auth_scheme_updates { function start_vacuum_analyze { echo "complete" > /tmp/pg-upgrade-status - su -c 'vacuumdb --all --analyze-in-stages' -s "$SHELL" postgres + if ! command -v nix &> /dev/null; then + su -c 'vacuumdb --all --analyze-in-stages' -s "$SHELL" postgres + else + su -c '. /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && vacuumdb --all --analyze-in-stages' -s "$SHELL" postgres + fi echo "Upgrade job completed" } trap cleanup ERR +echo "C.UTF-8 UTF-8" > /etc/locale.gen +echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen +locale-gen + if [ -z "$IS_CI" ]; then complete_pg_upgrade >> $LOG_FILE 2>&1 & else diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh index 1b7329ded..5c16682ae 100755 --- a/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh @@ -29,10 +29,13 @@ SCRIPT_DIR=$(dirname -- "$0";) source "$SCRIPT_DIR/common.sh" IS_CI=${IS_CI:-} -LOG_FILE="/var/log/pg-upgrade-initiate.log" +IS_LOCAL_UPGRADE=${IS_LOCAL_UPGRADE:-} +IS_NIX_UPGRADE=${IS_NIX_UPGRADE:-} +IS_NIX_BASED_SYSTEM="false" PGVERSION=$1 MOUNT_POINT="/data_migration" +LOG_FILE="/var/log/pg-upgrade-initiate.log" POST_UPGRADE_EXTENSION_SCRIPT="/tmp/pg_upgrade/pg_upgrade_extensions.sql" OLD_PGVERSION=$(run_sql -A -t -c "SHOW server_version;") @@ -41,6 +44,15 @@ POSTGRES_CONFIG_PATH="/etc/postgresql/postgresql.conf" PGBINOLD="/usr/lib/postgresql/bin" PGLIBOLD="/usr/lib/postgresql/lib" +PG_UPGRADE_BIN_DIR="/tmp/pg_upgrade_bin/$PGVERSION" + +if [ -L "$PGBINOLD/pg_upgrade" ]; then + BINARY_PATH=$(readlink -f "$PGBINOLD/pg_upgrade") + if [[ "$BINARY_PATH" == *"nix"* ]]; then + IS_NIX_BASED_SYSTEM="true" + fi +fi + # If upgrading from older major PG versions, disable specific extensions if [[ "$OLD_PGVERSION" =~ ^14.* ]]; then EXTENSIONS_TO_DISABLE+=("${PG14_EXTENSIONS_TO_DISABLE[@]}") @@ -107,7 +119,7 @@ cleanup() { echo "Removing SUPERUSER grant from postgres" run_sql -c "ALTER USER postgres WITH NOSUPERUSER;" - if [ -z "$IS_CI" ]; then + if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then echo "Unmounting data disk from ${MOUNT_POINT}" umount $MOUNT_POINT fi @@ -175,18 +187,50 @@ function initiate_upgrade { PGDATAOLD=$(cat "$POSTGRES_CONFIG_PATH" | grep data_directory | sed "s/data_directory = '\(.*\)'.*/\1/") PGDATANEW="$MOUNT_POINT/pgdata" - PG_UPGRADE_BIN_DIR="/tmp/pg_upgrade_bin/$PGVERSION" - PGBINNEW="$PG_UPGRADE_BIN_DIR/bin" - PGLIBNEW="$PG_UPGRADE_BIN_DIR/lib" - PGSHARENEW="$PG_UPGRADE_BIN_DIR/share" # running upgrade using at least 1 cpu core WORKERS=$(nproc | awk '{ print ($1 == 1 ? 1 : $1 - 1) }') + + # To make nix-based upgrades work for testing, create a pg binaries tarball with the following contents: + # - nix_flake_version - a7189a68ed4ea78c1e73991b5f271043636cf074 + # Where the value is the commit hash of the nix flake that contains the binaries + + if [ -n "$IS_LOCAL_UPGRADE" ]; then + mkdir -p "$PG_UPGRADE_BIN_DIR" + mkdir -p /tmp/persistent/ + echo "a7189a68ed4ea78c1e73991b5f271043636cf074" > "$PG_UPGRADE_BIN_DIR/nix_flake_version" + tar -czf "/tmp/persistent/pg_upgrade_bin.tar.gz" -C "/tmp/pg_upgrade_bin" . + rm -rf /tmp/pg_upgrade_bin/ + fi echo "1. Extracting pg_upgrade binaries" mkdir -p "/tmp/pg_upgrade_bin" tar zxf "/tmp/persistent/pg_upgrade_bin.tar.gz" -C "/tmp/pg_upgrade_bin" + PGSHARENEW="$PG_UPGRADE_BIN_DIR/share" + + if [ -f "$PG_UPGRADE_BIN_DIR/nix_flake_version" ]; then + IS_NIX_UPGRADE="true" + NIX_FLAKE_VERSION=$(cat "$PG_UPGRADE_BIN_DIR/nix_flake_version") + + if [ "$IS_NIX_BASED_SYSTEM" = "false" ]; then + echo "1.1. Nix is not installed; installing." + + curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf "substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com" \ + --extra-conf "trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" + fi + + echo "1.2. Installing flake revision: $NIX_FLAKE_VERSION" + # shellcheck disable=SC1091 + source /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + PG_UPGRADE_BIN_DIR=$(nix build "github:supabase/postgres/${NIX_FLAKE_VERSION}#psql_15/bin" --no-link --print-out-paths --extra-experimental-features nix-command --extra-experimental-features flakes) + PGSHARENEW="$PG_UPGRADE_BIN_DIR/share/postgresql" + fi + + PGBINNEW="$PG_UPGRADE_BIN_DIR/bin" + PGLIBNEW="$PG_UPGRADE_BIN_DIR/lib" + # copy upgrade-specific pgsodium_getkey script into the share dir chmod +x "$SCRIPT_DIR/pgsodium_getkey.sh" mkdir -p "$PGSHARENEW/extension" @@ -220,7 +264,7 @@ function initiate_upgrade { locale-gen fi - if [ -z "$IS_CI" ]; then + if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then # awk NF==3 prints lines with exactly 3 fields, which are the block devices currently not mounted anywhere # excluding nvme0 since it is the root disk echo "5. Determining block device to mount" @@ -254,13 +298,17 @@ function initiate_upgrade { echo "8. Granting SUPERUSER to postgres user" run_sql -c "ALTER USER postgres WITH SUPERUSER;" - if [ -d "/usr/share/postgresql/${PGVERSION}" ]; then - mv "/usr/share/postgresql/${PGVERSION}" "/usr/share/postgresql/${PGVERSION}.bak" - fi - ln -s "$PGSHARENEW" "/usr/share/postgresql/${PGVERSION}" + if [ -z "$IS_NIX_UPGRADE" ]; then + if [ -d "/usr/share/postgresql/${PGVERSION}" ]; then + mv "/usr/share/postgresql/${PGVERSION}" "/usr/share/postgresql/${PGVERSION}.bak" + fi + + ln -s "$PGSHARENEW" "/usr/share/postgresql/${PGVERSION}" + cp --remove-destination "$PGLIBNEW"/*.control "$PGSHARENEW/extension/" + cp --remove-destination "$PGLIBNEW"/*.sql "$PGSHARENEW/extension/" - cp --remove-destination "$PGLIBNEW"/*.control "$PGSHARENEW/extension/" - cp --remove-destination "$PGLIBNEW"/*.sql "$PGSHARENEW/extension/" + export LD_LIBRARY_PATH="${PGLIBNEW}" + fi # This is a workaround for older versions of wrappers which don't have the expected # naming scheme, containing the version in their library's file name @@ -287,8 +335,6 @@ function initiate_upgrade { fi fi - export LD_LIBRARY_PATH="${PGLIBNEW}" - echo "9. Creating new data directory, initializing database" chown -R postgres:postgres "$MOUNT_POINT/" rm -rf "${PGDATANEW:?}/" @@ -308,6 +354,10 @@ function initiate_upgrade { EOF ) + if [ "$IS_NIX_BASED_SYSTEM" = "true" ]; then + UPGRADE_COMMAND=". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && $UPGRADE_COMMAND" + fi + su -c "$UPGRADE_COMMAND --check" -s "$SHELL" postgres echo "10. Stopping postgres; running pg_upgrade" @@ -324,7 +374,11 @@ EOF CI_stop_postgres fi - su -c "$UPGRADE_COMMAND" -s "$SHELL" postgres + if [ "$IS_NIX_BASED_SYSTEM" = "true" ]; then + LC_ALL=en_US.UTF-8 LC_CTYPE=en_US.UTF-8 LANGUAGE=en_US.UTF-8 LANG=en_US.UTF-8 LOCALE_ARCHIVE=/usr/lib/locale/locale-archive su -pc "$UPGRADE_COMMAND" -s "$SHELL" postgres + else + su -c "$UPGRADE_COMMAND" -s "$SHELL" postgres + fi # copying custom configurations echo "11. Copying custom configurations" @@ -349,7 +403,7 @@ EOF trap cleanup ERR echo "running" > /tmp/pg-upgrade-status -if [ -z "$IS_CI" ]; then +if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then initiate_upgrade >> "$LOG_FILE" 2>&1 & echo "Upgrade initiate job completed" else diff --git a/ansible/files/permission_check.py b/ansible/files/permission_check.py new file mode 100644 index 000000000..724acb10a --- /dev/null +++ b/ansible/files/permission_check.py @@ -0,0 +1,204 @@ +import subprocess +import json +import sys + +# Expected groups for each user +expected_results = { + "postgres": [ + {"groupname": "postgres", "username": "postgres"}, + {"groupname": "ssl-cert", "username": "postgres"} + ], + "ubuntu": [ + {"groupname":"ubuntu","username":"ubuntu"}, + {"groupname":"adm","username":"ubuntu"}, + {"groupname":"dialout","username":"ubuntu"}, + {"groupname":"cdrom","username":"ubuntu"}, + {"groupname":"floppy","username":"ubuntu"}, + {"groupname":"sudo","username":"ubuntu"}, + {"groupname":"audio","username":"ubuntu"}, + {"groupname":"dip","username":"ubuntu"}, + {"groupname":"video","username":"ubuntu"}, + {"groupname":"plugdev","username":"ubuntu"}, + {"groupname":"lxd","username":"ubuntu"}, + {"groupname":"netdev","username":"ubuntu"} + ], + "root": [ + {"groupname":"root","username":"root"} + ], + "daemon": [ + {"groupname":"daemon","username":"daemon"} + ], + "bin": [ + {"groupname":"bin","username":"bin"} + ], + "sys": [ + {"groupname":"sys","username":"sys"} + ], + "sync": [ + {"groupname":"nogroup","username":"sync"} + ], + "games": [ + {"groupname":"games","username":"games"} + ], + "man": [ + {"groupname":"man","username":"man"} + ], + "lp": [ + {"groupname":"lp","username":"lp"} + ], + "mail": [ + {"groupname":"mail","username":"mail"} + ], + "news": [ + {"groupname":"news","username":"news"} + ], + "uucp": [ + {"groupname":"uucp","username":"uucp"} + ], + "proxy": [ + {"groupname":"proxy","username":"proxy"} + ], + "www-data": [ + {"groupname":"www-data","username":"www-data"} + ], + "backup": [ + {"groupname":"backup","username":"backup"} + ], + "list": [ + {"groupname":"list","username":"list"} + ], + "irc": [ + {"groupname":"irc","username":"irc"} + ], + "gnats": [ + {"groupname":"gnats","username":"gnats"} + ], + "nobody": [ + {"groupname":"nogroup","username":"nobody"} + ], + "systemd-network": [ + {"groupname":"systemd-network","username":"systemd-network"} + ], + "systemd-resolve": [ + {"groupname":"systemd-resolve","username":"systemd-resolve"} + ], + "systemd-timesync": [ + {"groupname":"systemd-timesync","username":"systemd-timesync"} + ], + "messagebus": [ + {"groupname":"messagebus","username":"messagebus"} + ], + "ec2-instance-connect": [ + {"groupname":"nogroup","username":"ec2-instance-connect"} + ], + "sshd": [ + {"groupname":"nogroup","username":"sshd"} + ], + "wal-g": [ + {"groupname":"wal-g","username":"wal-g"}, + {"groupname":"postgres","username":"wal-g"} + ], + "pgbouncer": [ + {"groupname":"pgbouncer","username":"pgbouncer"}, + {"groupname":"ssl-cert","username":"pgbouncer"}, + {"groupname":"postgres","username":"pgbouncer"} + ], + "gotrue": [ + {"groupname":"gotrue","username":"gotrue"} + ], + "envoy": [ + {"groupname":"envoy","username":"envoy"} + ], + "kong": [ + {"groupname":"kong","username":"kong"} + ], + "nginx": [ + {"groupname":"nginx","username":"nginx"} + ], + "vector": [ + {"groupname":"vector","username":"vector"}, + {"groupname":"adm","username":"vector"}, + {"groupname":"systemd-journal","username":"vector"}, + {"groupname":"postgres","username":"vector"} + ], + "adminapi": [ + {"groupname":"adminapi","username":"adminapi"}, + {"groupname":"root","username":"adminapi"}, + {"groupname":"systemd-journal","username":"adminapi"}, + {"groupname":"admin","username":"adminapi"}, + {"groupname":"postgres","username":"adminapi"}, + {"groupname":"pgbouncer","username":"adminapi"}, + {"groupname":"wal-g","username":"adminapi"}, + {"groupname":"postgrest","username":"adminapi"}, + {"groupname":"envoy","username":"adminapi"}, + {"groupname":"kong","username":"adminapi"}, + {"groupname":"vector","username":"adminapi"} + ], + "postgrest": [ + {"groupname":"postgrest","username":"postgrest"} + ], + "tcpdump": [ + {"groupname":"tcpdump","username":"tcpdump"} + ], + "systemd-coredump": [ + {"groupname":"systemd-coredump","username":"systemd-coredump"} + ] +} +# This program depends on osquery being installed on the system +# Function to run osquery +def run_osquery(query): + process = subprocess.Popen(['osqueryi', '--json', query], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, error = process.communicate() + return output.decode('utf-8') + +def parse_json(json_str): + try: + return json.loads(json_str) + except json.JSONDecodeError as e: + print("Error decoding JSON:", e) + sys.exit(1) + +def compare_results(username, query_result): + expected_result = expected_results.get(username) + if expected_result is None: + print(f"No expected result defined for user '{username}'") + sys.exit(1) + + if query_result == expected_result: + print(f"The query result for user '{username}' matches the expected result.") + else: + print(f"The query result for user '{username}' does not match the expected result.") + print("Expected:", expected_result) + print("Got:", query_result) + sys.exit(1) + +def check_nixbld_users(): + query = """ + SELECT u.username, g.groupname + FROM users u + JOIN user_groups ug ON u.uid = ug.uid + JOIN groups g ON ug.gid = g.gid + WHERE u.username LIKE 'nixbld%'; + """ + query_result = run_osquery(query) + parsed_result = parse_json(query_result) + + for user in parsed_result: + if user['groupname'] != 'nixbld': + print(f"User '{user['username']}' is in group '{user['groupname']}' instead of 'nixbld'.") + sys.exit(1) + + print("All nixbld users are in the 'nixbld' group.") + +# Define usernames for which you want to compare results +usernames = ["postgres", "ubuntu", "root", "daemon", "bin", "sys", "sync", "games","man","lp","mail","news","uucp","proxy","www-data","backup","list","irc","gnats","nobody","systemd-network","systemd-resolve","systemd-timesync","messagebus","ec2-instance-connect","sshd","wal-g","pgbouncer","gotrue","envoy","kong","nginx","vector","adminapi","postgrest","tcpdump","systemd-coredump"] + +# Iterate over usernames, run the query, and compare results +for username in usernames: + query = f"SELECT u.username, g.groupname FROM users u JOIN user_groups ug ON u.uid = ug.uid JOIN groups g ON ug.gid = g.gid WHERE u.username = '{username}';" + query_result = run_osquery(query) + parsed_result = parse_json(query_result) + compare_results(username, parsed_result) + +# Check if all nixbld users are in the nixbld group +check_nixbld_users() diff --git a/ansible/files/postgresql_config/postgresql.service.j2 b/ansible/files/postgresql_config/postgresql.service.j2 index 88cf62c81..8e5318cba 100644 --- a/ansible/files/postgresql_config/postgresql.service.j2 +++ b/ansible/files/postgresql_config/postgresql.service.j2 @@ -19,6 +19,7 @@ TimeoutStartSec=86400 Restart=always RestartSec=5 OOMScoreAdjust=-1000 +EnvironmentFile=-/etc/environment.d/postgresql.env [Install] WantedBy=multi-user.target diff --git a/ansible/playbook.yml b/ansible/playbook.yml index 16a689f52..01e36e9dc 100644 --- a/ansible/playbook.yml +++ b/ansible/playbook.yml @@ -3,7 +3,6 @@ pre_tasks: - import_tasks: tasks/setup-system.yml - vars_files: - ./vars.yml @@ -14,7 +13,7 @@ dest: "00-schema.sql", } - { source: "stat_extension.sql", dest: "01-extension.sql" } - + environment: PATH: /usr/lib/postgresql/bin:{{ ansible_env.PATH }} @@ -35,70 +34,75 @@ tags: - install-pgbouncer - install-supabase-internal + when: debpkg_mode or nixpkg_mode - name: Install WAL-G import_tasks: tasks/setup-wal-g.yml + when: debpkg_mode or nixpkg_mode - name: Install Gotrue import_tasks: tasks/setup-gotrue.yml tags: - install-gotrue - install-supabase-internal - + when: debpkg_mode or nixpkg_mode + - name: Install PostgREST import_tasks: tasks/setup-postgrest.yml tags: - install-postgrest - install-supabase-internal + when: debpkg_mode or nixpkg_mode - name: Install Envoy import_tasks: tasks/setup-envoy.yml tags: - install-supabase-internal + when: debpkg_mode or nixpkg_mode - name: Install Kong import_tasks: tasks/setup-kong.yml tags: - install-supabase-internal + when: debpkg_mode or nixpkg_mode - name: Install nginx import_tasks: tasks/setup-nginx.yml tags: - install-supabase-internal + when: debpkg_mode or nixpkg_mode - name: Install Supabase specific content import_tasks: tasks/setup-supabase-internal.yml tags: - install-supabase-internal + when: debpkg_mode or nixpkg_mode - name: Fix IPv6 NDisc issues import_tasks: tasks/fix_ipv6_ndisc.yml tags: - install-supabase-internal - - - name: Start Postgres Database - systemd: - name: postgresql - state: started - when: not ebssurrogate_mode + when: debpkg_mode or nixpkg_mode - name: Start Postgres Database without Systemd become: yes become_user: postgres shell: cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start - when: ebssurrogate_mode + when: debpkg_mode - name: Adjust APT update intervals copy: src: files/apt_periodic dest: /etc/apt/apt.conf.d/10periodic - + when: debpkg_mode or nixpkg_mode + - name: Transfer init SQL files copy: src: files/{{ item.source }} dest: /tmp/{{ item.dest }} loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix - name: Execute init SQL files become: yes @@ -106,25 +110,30 @@ shell: cmd: /usr/lib/postgresql/bin/psql -f /tmp/{{ item.dest }} loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix - name: Delete SQL scripts file: path: /tmp/{{ item.dest }} state: absent loop: "{{ sql_files }}" + when: debpkg_mode or stage2_nix - name: First boot optimizations import_tasks: tasks/internal/optimizations.yml tags: - install-supabase-internal - + when: debpkg_mode or stage2_nix + - name: Finalize AMI import_tasks: tasks/finalize-ami.yml tags: - install-supabase-internal - + when: debpkg_mode or nixpkg_mode + - name: Enhance fail2ban import_tasks: tasks/setup-fail2ban.yml + when: debpkg_mode or nixpkg_mode # Install EC2 instance connect # Only for AWS images @@ -153,26 +162,47 @@ become_user: postgres shell: cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data restart -o "-c shared_preload_libraries='pg_tle'" - when: ebssurrogate_mode + when: debpkg_mode - name: Run migrations import_tasks: tasks/setup-migrations.yml tags: - migrations + when: debpkg_mode or stage2_nix - name: Stop Postgres Database without Systemd become: yes become_user: postgres shell: cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop - when: ebssurrogate_mode + when: debpkg_mode - name: Run unit tests import_tasks: tasks/test-image.yml tags: - unit-tests + when: debpkg_mode or stage2_nix - name: Collect Postgres binaries import_tasks: tasks/internal/collect-pg-binaries.yml tags: - collect-binaries + when: debpkg_mode + + - name: Install osquery from nixpkgs binary cache + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install nixpkgs#osquery" + when: stage2_nix + + - name: Run osquery permission checks + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && /usr/bin/python3 /tmp/ansible-playbook/ansible/files/permission_check.py" + when: stage2_nix + + - name: Remove osquery + become: yes + shell: | + sudo -u ubuntu bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile remove osquery" + when: stage2_nix diff --git a/ansible/tasks/finalize-ami.yml b/ansible/tasks/finalize-ami.yml index 9818f2add..7f0de3ac8 100644 --- a/ansible/tasks/finalize-ami.yml +++ b/ansible/tasks/finalize-ami.yml @@ -78,3 +78,4 @@ owner: postgres group: postgres mode: 0700 + when: debpkg_mode or stage2_nix diff --git a/ansible/tasks/internal/optimizations.yml b/ansible/tasks/internal/optimizations.yml index 895acccd9..157c35123 100644 --- a/ansible/tasks/internal/optimizations.yml +++ b/ansible/tasks/internal/optimizations.yml @@ -1,39 +1,29 @@ -- name: ensure services are stopped - community.general.snap: - name: amazon-ssm-agent - state: absent - failed_when: not ebssurrogate_mode - -- name: ensure services are stopped and disabled for first boot +- name: ensure services are stopped and disabled for first boot debian build systemd: enabled: no name: '{{ item }}' state: stopped with_items: - - snapd - postgresql - pgbouncer - fail2ban - motd-news - vector - failed_when: not ebssurrogate_mode - -- name: Remove snapd - apt: - state: absent - pkg: - - snapd - failed_when: not ebssurrogate_mode + - lvm2-monitor + when: debpkg_mode -- name: ensure services are stopped and disabled for first boot +- name: ensure services are stopped and disabled for first boot nix build systemd: enabled: no name: '{{ item }}' state: stopped - masked: yes with_items: - - lvm2-monitor - failed_when: not ebssurrogate_mode + - postgresql + - pgbouncer + - fail2ban + - motd-news + - vector + when: stage2_nix - name: disable man-db become: yes @@ -44,4 +34,4 @@ - man-db - popularity-contest - ubuntu-advantage-tools - failed_when: not ebssurrogate_mode + when: debpkg_mode or stage2_nix diff --git a/ansible/tasks/setup-docker.yml b/ansible/tasks/setup-docker.yml index 42f4f3b3f..7b37f70cc 100644 --- a/ansible/tasks/setup-docker.yml +++ b/ansible/tasks/setup-docker.yml @@ -2,6 +2,7 @@ copy: src: files/extensions/ dest: /tmp/extensions/ + when: debpkg_mode # Builtin apt module does not support wildcard for deb paths - name: Install extensions @@ -9,12 +10,16 @@ set -e apt-get update apt-get install -y --no-install-recommends /tmp/extensions/*.deb + when: debpkg_mode - name: pgsodium - determine postgres bin directory shell: pg_config --bindir register: pg_bindir_output + when: debpkg_mode + - set_fact: pg_bindir: "{{ pg_bindir_output.stdout }}" + when: debpkg_mode - name: pgsodium - set pgsodium.getkey_script become: yes @@ -23,6 +28,7 @@ state: present # script is expected to be placed by finalization tasks for different target platforms line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' + when: debpkg_mode # supautils - name: supautils - add supautils to session_preload_libraries @@ -31,6 +37,7 @@ path: /etc/postgresql/postgresql.conf regexp: "#session_preload_libraries = ''" replace: session_preload_libraries = 'supautils' + when: debpkg_mode or stage2_nix - name: supautils - write custom supautils.conf template: @@ -39,12 +46,14 @@ mode: 0664 owner: postgres group: postgres + when: debpkg_mode or stage2_nix - name: supautils - copy extension custom scripts copy: src: files/postgresql_extension_custom_scripts/ dest: /etc/postgresql-custom/extension-custom-scripts become: yes + when: debpkg_mode or stage2_nix - name: supautils - chown extension custom scripts file: @@ -54,6 +63,7 @@ path: /etc/postgresql-custom/extension-custom-scripts recurse: yes become: yes + when: debpkg_mode or stage2_nix - name: supautils - include /etc/postgresql-custom/supautils.conf in postgresql.conf become: yes @@ -61,8 +71,10 @@ path: /etc/postgresql/postgresql.conf regexp: "#include = '/etc/postgresql-custom/supautils.conf'" replace: "include = '/etc/postgresql-custom/supautils.conf'" + when: debpkg_mode or stage2_nix - name: Cleanup - extension packages file: path: /tmp/extensions state: absent + when: debpkg_mode diff --git a/ansible/tasks/setup-fail2ban.yml b/ansible/tasks/setup-fail2ban.yml index e1cec2d92..1f6065d32 100644 --- a/ansible/tasks/setup-fail2ban.yml +++ b/ansible/tasks/setup-fail2ban.yml @@ -5,16 +5,19 @@ path: /etc/fail2ban/jail.conf regexp: bantime = 10m replace: bantime = 3600 + when: debpkg_mode or nixpkg_mode - name: Configure journald copy: src: files/fail2ban_config/jail-ssh.conf dest: /etc/fail2ban/jail.d/sshd.local + when: debpkg_mode or nixpkg_mode - name: configure fail2ban to use nftables copy: src: files/fail2ban_config/jail.local dest: /etc/fail2ban/jail.local + when: debpkg_mode or nixpkg_mode # postgresql - name: import jail.d/postgresql.conf @@ -22,12 +25,14 @@ src: files/fail2ban_config/jail-postgresql.conf.j2 dest: /etc/fail2ban/jail.d/postgresql.conf become: yes + when: debpkg_mode or nixpkg_mode - name: import filter.d/postgresql.conf template: src: files/fail2ban_config/filter-postgresql.conf.j2 dest: /etc/fail2ban/filter.d/postgresql.conf become: yes + when: debpkg_mode or nixpkg_mode - name: create overrides dir file: @@ -36,11 +41,13 @@ group: root path: /etc/systemd/system/fail2ban.service.d mode: '0700' + when: debpkg_mode or nixpkg_mode - name: Custom systemd overrides copy: src: files/fail2ban_config/fail2ban.service.conf dest: /etc/systemd/system/fail2ban.service.d/overrides.conf + when: debpkg_mode or nixpkg_mode - name: add in supabase specific ignore filters lineinfile: @@ -56,15 +63,18 @@ become: yes tags: - install-supabase-internal + when: debpkg_mode or nixpkg_mode # Restart - name: fail2ban - restart systemd: name: fail2ban state: restarted + when: debpkg_mode or nixpkg_mode - name: fail2ban - disable service systemd: name: fail2ban enabled: no daemon_reload: yes + when: debpkg_mode or nixpkg_mode \ No newline at end of file diff --git a/ansible/tasks/setup-migrations.yml b/ansible/tasks/setup-migrations.yml index 570f7763c..6eea6844b 100644 --- a/ansible/tasks/setup-migrations.yml +++ b/ansible/tasks/setup-migrations.yml @@ -1,7 +1,7 @@ - name: Run migrate.sh script shell: ./migrate.sh register: retval - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix args: chdir: /tmp/migrations/db failed_when: retval.rc != 0 @@ -10,4 +10,4 @@ file: path: "/root/MIGRATION-AMI" state: touch - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix diff --git a/ansible/tasks/setup-postgres.yml b/ansible/tasks/setup-postgres.yml index c84142928..c1cf1983e 100644 --- a/ansible/tasks/setup-postgres.yml +++ b/ansible/tasks/setup-postgres.yml @@ -2,39 +2,95 @@ copy: src: files/postgres/ dest: /tmp/build/ + when: debpkg_mode - name: Postgres - add PPA apt_repository: repo: "deb [ trusted=yes ] file:///tmp/build ./" state: present + when: debpkg_mode - name: Postgres - install commons apt: name: postgresql-common install_recommends: no + when: debpkg_mode - name: Do not create main cluster shell: cmd: sed -ri 's/#(create_main_cluster) .*$/\1 = false/' /etc/postgresql-common/createcluster.conf + when: debpkg_mode - name: Postgres - install server apt: name: postgresql-{{ postgresql_major }}={{ postgresql_release }}-1.pgdg20.04+1 install_recommends: no + when: debpkg_mode - name: Postgres - remove PPA apt_repository: repo: "deb [ trusted=yes ] file:///tmp/build ./" state: absent + when: debpkg_mode - name: Postgres - cleanup package file: path: /tmp/build state: absent + when: debpkg_mode + +- name: install locales + apt: + name: locales + state: present + become: yes + when: stage2_nix + +- name: configure locales + command: echo "C.UTF-8 UTF-8" > /etc/locale.gen && echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen + become: yes + when: stage2_nix + +- name: locale-gen + command: sudo locale-gen + when: stage2_nix + +- name: update-locale + command: sudo update-locale + when: stage2_nix - name: Create symlink to /usr/lib/postgresql/bin shell: cmd: ln -s /usr/lib/postgresql/{{ postgresql_major }}/bin /usr/lib/postgresql/bin + when: debpkg_mode + +- name: create ssl-cert group + group: + name: ssl-cert + state: present + when: nixpkg_mode +# the old method of installing from debian creates this group, but we must create it explicitly +# for the nix built version + +- name: create postgres group + group: + name: postgres + state: present + when: nixpkg_mode + +- name: create postgres user + shell: adduser --system --home /var/lib/postgresql --no-create-home --shell /bin/bash --group --gecos "PostgreSQL administrator" postgres + args: + executable: /bin/bash + become: yes + when: nixpkg_mode + +- name: add postgres user to postgres group + shell: usermod -a -G ssl-cert postgres + args: + executable: /bin/bash + become: yes + when: nixpkg_mode - name: Create relevant directories file: @@ -47,6 +103,7 @@ - '/home/postgres' - '/var/log/postgresql' - '/var/lib/postgresql' + when: debpkg_mode or nixpkg_mode - name: Allow adminapi to write custom config file: @@ -59,6 +116,7 @@ with_items: - '/etc/postgresql' - '/etc/postgresql-custom' + when: debpkg_mode or nixpkg_mode - name: create placeholder config files file: @@ -70,6 +128,7 @@ with_items: - 'generated-optimizations.conf' - 'custom-overrides.conf' + when: debpkg_mode or nixpkg_mode # Move Postgres configuration files into /etc/postgresql # Add postgresql.conf @@ -78,6 +137,7 @@ src: files/postgresql_config/postgresql.conf.j2 dest: /etc/postgresql/postgresql.conf group: postgres + when: debpkg_mode or nixpkg_mode # Add pg_hba.conf - name: import pg_hba.conf @@ -85,6 +145,7 @@ src: files/postgresql_config/pg_hba.conf.j2 dest: /etc/postgresql/pg_hba.conf group: postgres + when: debpkg_mode or nixpkg_mode # Add pg_ident.conf - name: import pg_ident.conf @@ -92,6 +153,7 @@ src: files/postgresql_config/pg_ident.conf.j2 dest: /etc/postgresql/pg_ident.conf group: postgres + when: debpkg_mode or nixpkg_mode # Add custom config for read replicas set up - name: Move custom read-replica.conf file to /etc/postgresql-custom/read-replica.conf @@ -101,10 +163,17 @@ mode: 0664 owner: postgres group: postgres + when: debpkg_mode or nixpkg_mode # Install extensions before init - name: Install Postgres extensions import_tasks: tasks/setup-docker.yml + when: debpkg_mode or stage2_nix + +#stage 2 postgres tasks +- name: stage2 postgres tasks + import_tasks: tasks/stage2-setup-postgres.yml + when: stage2_nix # init DB - name: Create directory on data volume @@ -117,6 +186,7 @@ mode: 0750 with_items: - "/data/pgdata" + when: debpkg_mode or nixpkg_mode - name: Link database data_dir to data volume directory file: @@ -124,26 +194,60 @@ path: "/var/lib/postgresql/data" state: link force: yes + when: debpkg_mode or nixpkg_mode - name: Initialize the database become: yes become_user: postgres - shell: - cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" + shell: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" + vars: + ansible_command_timeout: 60 + when: debpkg_mode + +- name: Initialize the database stage2_nix + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data initdb -o "--allow-group-access" + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive vars: ansible_command_timeout: 60 # Circumvents the following error: # "Timeout (12s) waiting for privilege escalation prompt" + when: stage2_nix - name: copy PG systemd unit template: src: files/postgresql_config/postgresql.service.j2 dest: /etc/systemd/system/postgresql.service + when: debpkg_mode or stage2_nix - name: copy optimizations systemd unit template: src: files/database-optimizations.service.j2 dest: /etc/systemd/system/database-optimizations.service + when: debpkg_mode or stage2_nix + +- name: Restart Postgres Database without Systemd + become: yes + become_user: postgres + shell: | + source /var/lib/postgresql/.bashrc + /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix + # Reload - name: System - systemd reload @@ -151,3 +255,33 @@ enabled: yes name: postgresql daemon_reload: yes + when: debpkg_mode or stage2_nix + +- name: Make sure .bashrc exists + file: + path: /var/lib/postgresql/.bashrc + state: touch + owner: postgres + group: postgres + when: nixpkg_mode + +- name: Add LOCALE_ARCHIVE to .bashrc + lineinfile: + dest: "/var/lib/postgresql/.bashrc" + line: 'export LOCALE_ARCHIVE=/usr/lib/locale/locale-archive' + create: yes + become: yes + when: nixpkg_mode + +- name: Add LANG items to .bashrc + lineinfile: + dest: "/var/lib/postgresql/.bashrc" + line: "{{ item }}" + loop: + - 'export LANG="en_US.UTF-8"' + - 'export LANGUAGE="en_US.UTF-8"' + - 'export LC_ALL="en_US.UTF-8"' + - 'export LANG="en_US.UTF-8"' + - 'export LC_CTYPE="en_US.UTF-8"' + become: yes + when: nixpkg_mode diff --git a/ansible/tasks/setup-system.yml b/ansible/tasks/setup-system.yml index 860d75cc2..d18c19130 100644 --- a/ansible/tasks/setup-system.yml +++ b/ansible/tasks/setup-system.yml @@ -1,6 +1,6 @@ - name: System - apt update and apt upgrade apt: update_cache=yes upgrade=yes - when: not ebssurrogate_mode + when: debpkg_mode or nixpkg_mode # SEE http://archive.vn/DKJjs#parameter-upgrade - name: Install required security updates @@ -8,13 +8,14 @@ pkg: - tzdata - linux-libc-dev - + when: debpkg_mode or nixpkg_mode # SEE https://github.com/georchestra/ansible/issues/55#issuecomment-588313638 # Without this, a similar error is faced - name: Install Ansible dependencies apt: pkg: - acl + when: debpkg_mode or nixpkg_mode - name: Install security tools apt: @@ -23,6 +24,7 @@ - fail2ban update_cache: yes cache_valid_time: 3600 + when: debpkg_mode or nixpkg_mode - name: Use nftables backend shell: | @@ -31,11 +33,13 @@ update-alternatives --set arptables /usr/sbin/arptables-nft update-alternatives --set ebtables /usr/sbin/ebtables-nft systemctl restart ufw + when: debpkg_mode or nixpkg_mode - name: Create Sysstat log directory file: path: /var/log/sysstat state: directory + when: debpkg_mode or nixpkg_mode - name: Install other useful tools apt: @@ -47,22 +51,26 @@ - sysstat - vim-tiny update_cache: yes + when: debpkg_mode or nixpkg_mode - name: Configure sysstat copy: src: files/sysstat.sysstat dest: /etc/sysstat/sysstat + when: debpkg_mode or nixpkg_mode - name: Configure default sysstat copy: src: files/default.sysstat dest: /etc/default/sysstat + when: debpkg_mode or nixpkg_mode - name: Adjust APT update intervals copy: src: files/apt_periodic dest: /etc/apt/apt.conf.d/10periodic + when: debpkg_mode or nixpkg_mode # Find platform architecture and set as a variable - name: finding platform architecture @@ -76,6 +84,7 @@ tags: - update - update-only + when: debpkg_mode or nixpkg_mode or stage2_nix - name: create overrides dir file: @@ -84,40 +93,48 @@ group: root path: /etc/systemd/system/systemd-resolved.service.d mode: '0700' + when: debpkg_mode or nixpkg_mode - name: Custom systemd overrides for resolved copy: src: files/systemd-resolved.conf dest: /etc/systemd/system/systemd-resolved.service.d/override.conf + when: debpkg_mode or nixpkg_mode - name: System - Create services.slice template: src: files/services.slice.j2 dest: /etc/systemd/system/services.slice - when: not ebssurrogate_mode + when: debpkg_mode or nixpkg_mode + - name: System - systemd reload systemd: daemon_reload=yes + when: debpkg_mode or nixpkg_mode - name: Configure journald copy: src: files/journald.conf dest: /etc/systemd/journald.conf + when: debpkg_mode or nixpkg_mode - name: reload systemd-journald systemd: name: systemd-journald state: restarted + when: debpkg_mode or nixpkg_mode - name: Configure logind copy: src: files/logind.conf dest: /etc/systemd/logind.conf + when: debpkg_mode or nixpkg_mode - name: reload systemd-logind systemd: name: systemd-logind state: restarted + when: debpkg_mode or nixpkg_mode - name: enable timestamps for shell history copy: @@ -127,6 +144,7 @@ mode: 0644 owner: root group: root + when: debpkg_mode or nixpkg_mode - name: set hosts file copy: @@ -137,6 +155,7 @@ mode: 0644 owner: root group: root + when: debpkg_mode or stage2_nix #Set Sysctl params for restarting the OS on oom after 10 - name: Set vm.panic_on_oom=1 @@ -145,6 +164,7 @@ value: '1' state: present reload: yes + when: debpkg_mode or nixpkg_mode - name: Set kernel.panic=10 ansible.builtin.sysctl: @@ -152,3 +172,4 @@ value: '10' state: present reload: yes + when: debpkg_mode or nixpkg_mode diff --git a/ansible/tasks/stage2-setup-postgres.yml b/ansible/tasks/stage2-setup-postgres.yml new file mode 100644 index 000000000..911e1eae5 --- /dev/null +++ b/ansible/tasks/stage2-setup-postgres.yml @@ -0,0 +1,222 @@ +# - name: Install openjdk11 for pljava from nix binary cache +# become: yes +# shell: | +# sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install nixpkgs#openjdk11" +# It was decided to leave pljava disabled at https://github.com/supabase/postgres/pull/690 therefore removing this task +- name: Install Postgres from nix binary cache + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:supabase/postgres/{{ git_commit_sha }}#psql_15/bin" +#TODO (samrose) switch pg_prove sourcing to develop branch once PR is merged + when: stage2_nix + +- name: Install pg_prove from nix binary cache + become: yes + shell: | + sudo -u postgres bash -c ". /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh && nix profile install github:supabase/postgres/sam/2-stage-ami-nix#pg_prove" + when: stage2_nix + +- name: Set ownership and permissions for /etc/ssl/private + become: yes + file: + path: /etc/ssl/private + owner: root + group: postgres + mode: '0750' + when: stage2_nix + +- name: Set permissions for postgresql.env + become: yes + file: + path: /etc/environment.d/postgresql.env + owner: postgres + group: postgres + mode: '0644' + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/bin directory exists + file: + path: /usr/lib/postgresql/bin + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share directory exists + file: + path: /usr/lib/postgresql/share/postgresql + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/contrib directory exists + file: + path: /usr/lib/postgresql/share/postgresql/contrib + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/timezonesets directory exists + file: + path: /usr/lib/postgresql/share/postgresql/timezonesets + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/tsearch_data directory exists + file: + path: /usr/lib/postgresql/share/postgresql/tsearch_data + state: directory + owner: postgres + group: postgres + when: stage2_nix + +- name: Ensure /usr/lib/postgresql/share/extension directory exists + file: + path: /usr/lib/postgresql/share/postgresql/extension + state: directory + owner: postgres + group: postgres + when: stage2_nix + +# - name: Ensure /usr/lib/postgresql/share/postgresql/pljava directory exists +# file: +# path: /usr/lib/postgresql/share/postgresql/pljava +# state: directory +# owner: postgres +# group: postgres +# when: stage2_nix +# It was decided to leave pljava disabled at https://github.com/supabase/postgres/pull/690 therefore removing this task + +- name: import pgsodium_getkey script + template: + src: /tmp/ansible-playbook/ansible/files/pgsodium_getkey_readonly.sh.j2 + dest: "/usr/lib/postgresql/bin/pgsodium_getkey.sh" + owner: postgres + group: postgres + mode: 0700 + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/bin to /usr/lib/postgresql/bin + file: + src: "{{ item }}" + dest: "/usr/lib/postgresql/bin/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/bin/*" + become: yes + when: stage2_nix + +- name: Check if /usr/bin/pg_config exists + stat: + path: /usr/bin/pg_config + register: pg_config_stat + when: stage2_nix + +- name: Remove existing /usr/bin/pg_config if it is not a symlink + file: + path: /usr/bin/pg_config + state: absent + when: pg_config_stat.stat.exists and not pg_config_stat.stat.islnk and stage2_nix + become: yes + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/bin to /usr/bin + file: + src: "{{ item }}" + dest: "/usr/bin/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/bin/*" + become: yes + when: stage2_nix + +- name: Ensure postgres user has ownership of symlink + file: + path: "/usr/bin/{{ item | basename }}" + owner: postgres + group: postgres + with_fileglob: + - "/var/lib/postgresql/.nix-profile/bin/*" + become: yes + when: stage2_nix + +# - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/pljava to /usr/lib/postgresql/share/postgresql/pljava +# file: +# src: "{{ item }}" +# dest: "/usr/lib/postgresql/share/postgresql/pljava/{{ item | basename }}" +# state: link +# with_fileglob: +# - "/var/lib/postgresql/.nix-profile/share/pljava/*" +# become: yes +# It was decided to leave pljava disabled at https://github.com/supabase/postgres/pull/690 therefore removing this task + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql to /usr/lib/postgresql/share/postgresql + file: + src: "{{ item }}" + dest: "/usr/lib/postgresql/share/postgresql/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/share/postgresql/*" + become: yes + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/extension to /usr/lib/postgresql/share/postgresql/extension + file: + src: "{{ item }}" + dest: "/usr/lib/postgresql/share/postgresql/extension/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/share/postgresql/extension/*" + become: yes + when: stage2_nix + +- name: create destination directory + file: + path: /usr/lib/postgresql/share/postgresql/contrib/ + state: directory + recurse: yes + when: stage2_nix + +- name: Recursively create symbolic links and set permissions for the contrib/postgis-* dir + shell: > + sudo mkdir -p /usr/lib/postgresql/share/postgresql/contrib && \ + sudo find /var/lib/postgresql/.nix-profile/share/postgresql/contrib/ -mindepth 1 -type d -exec sh -c 'for dir do sudo ln -s "$dir" "/usr/lib/postgresql/share/postgresql/contrib/$(basename "$dir")"; done' sh {} + \ + && chown -R postgres:postgres "/usr/lib/postgresql/share/postgresql/contrib/" + become: yes + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/timezonesets to /usr/lib/postgresql/share/postgresql/timeszonesets + file: + src: "{{ item }}" + dest: "/usr/lib/postgresql/share/postgresql/timezonesets/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/share/postgresql/timezonesets/*" + become: yes + when: stage2_nix + +- name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data to /usr/lib/postgresql/share/postgresql/tsearch_data + file: + src: "{{ item }}" + dest: "/usr/lib/postgresql/share/postgresql/tsearch_data/{{ item | basename }}" + state: link + with_fileglob: + - "/var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data/*" + become: yes + when: stage2_nix + +- set_fact: + pg_bindir: "/usr/lib/postgresql/bin" + when: stage2_nix + +- name: pgsodium - set pgsodium.getkey_script + become: yes + lineinfile: + path: /etc/postgresql/postgresql.conf + state: present + # script is expected to be placed by finalization tasks for different target platforms + line: pgsodium.getkey_script= '{{ pg_bindir }}/pgsodium_getkey.sh' + when: stage2_nix diff --git a/ansible/tasks/test-image.yml b/ansible/tasks/test-image.yml index cd92a27d6..d6e8223f7 100644 --- a/ansible/tasks/test-image.yml +++ b/ansible/tasks/test-image.yml @@ -2,32 +2,61 @@ apt: pkg: - libtap-parser-sourcehandler-pgtap-perl + when: debpkg_mode - name: Temporarily disable PG Sodium references in config become: yes become_user: postgres shell: cmd: sed -i.bak -e "s/pg_net,\ pgsodium,\ timescaledb/pg_net,\ timescaledb/g" -e "s/pgsodium.getkey_script=/#pgsodium.getkey_script=/g" /etc/postgresql/postgresql.conf - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix - name: Start Postgres Database to load all extensions. become: yes become_user: postgres shell: cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start "-o -c config_file=/etc/postgresql/postgresql.conf" - when: ebssurrogate_mode + when: debpkg_mode + +- name: Stop Postgres Database in stage 2 + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix + +- name: Start Postgres Database to load all extensions. + become: yes + become_user: postgres + shell: source /var/lib/postgresql/.bashrc && /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data start "-o -c config_file=/etc/postgresql/postgresql.conf" + args: + executable: /bin/bash + environment: + LANG: en_US.UTF-8 + LANGUAGE: en_US.UTF-8 + LC_ALL: en_US.UTF-8 + LC_CTYPE: en_US.UTF-8 + LOCALE_ARCHIVE: /usr/lib/locale/locale-archive + when: stage2_nix - name: Run Unit tests (with filename unit-test-*) on Postgres Database shell: /usr/bin/pg_prove -U postgres -h localhost -d postgres -v /tmp/unit-tests/unit-test-*.sql register: retval failed_when: retval.rc != 0 - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix - name: Run migrations tests shell: /usr/bin/pg_prove -U supabase_admin -h localhost -d postgres -v tests/test.sql register: retval failed_when: retval.rc != 0 - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix args: chdir: /tmp/migrations @@ -36,11 +65,11 @@ become_user: postgres shell: cmd: mv /etc/postgresql/postgresql.conf.bak /etc/postgresql/postgresql.conf - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix - name: Reset db stats shell: /usr/lib/postgresql/bin/psql --no-password --no-psqlrc -d postgres -h localhost -U supabase_admin -c 'SELECT pg_stat_statements_reset(); SELECT pg_stat_reset();' - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix - name: remove pg_prove apt: @@ -48,10 +77,11 @@ - libtap-parser-sourcehandler-pgtap-perl state: absent autoremove: yes + when: debpkg_mode - name: Stop Postgres Database become: yes become_user: postgres shell: cmd: /usr/lib/postgresql/bin/pg_ctl -D /var/lib/postgresql/data stop - when: ebssurrogate_mode + when: debpkg_mode or stage2_nix diff --git a/common-nix.vars.pkr.hcl b/common-nix.vars.pkr.hcl new file mode 100644 index 000000000..f1d6f4089 --- /dev/null +++ b/common-nix.vars.pkr.hcl @@ -0,0 +1 @@ +postgres-version = "15.6.1.96-nix-staged" diff --git a/ebssurrogate/scripts/chroot-bootstrap-nix.sh b/ebssurrogate/scripts/chroot-bootstrap-nix.sh new file mode 100755 index 000000000..cda6bd2aa --- /dev/null +++ b/ebssurrogate/scripts/chroot-bootstrap-nix.sh @@ -0,0 +1,219 @@ +#!/usr/bin/env bash +# +# This script runs inside chrooted environment. It installs grub and its +# Configuration file. +# + +set -o errexit +set -o pipefail +set -o xtrace + +export DEBIAN_FRONTEND=noninteractive + +export APT_OPTIONS="-oAPT::Install-Recommends=false \ + -oAPT::Install-Suggests=false \ + -oAcquire::Languages=none" + +if [ $(dpkg --print-architecture) = "amd64" ]; +then + ARCH="amd64"; +else + ARCH="arm64"; +fi + + + +function update_install_packages { + source /etc/os-release + + # Update APT with new sources + cat /etc/apt/sources.list + apt-get $APT_OPTIONS update && apt-get $APT_OPTIONS --yes dist-upgrade + + # Do not configure grub during package install + if [ "${ARCH}" = "amd64" ]; then + echo 'grub-pc grub-pc/install_devices_empty select true' | debconf-set-selections + echo 'grub-pc grub-pc/install_devices select' | debconf-set-selections + # Install various packages needed for a booting system + apt-get install -y \ + linux-aws \ + grub-pc \ + e2fsprogs + else + apt-get install -y e2fsprogs + fi + # Install standard packages + apt-get install -y \ + sudo \ + wget \ + cloud-init \ + acpid \ + ec2-hibinit-agent \ + ec2-instance-connect \ + hibagent \ + ncurses-term \ + ssh-import-id \ + + # apt upgrade + apt-get upgrade -y + + # Install OpenSSH and other packages + sudo add-apt-repository universe + apt-get update + apt-get install -y --no-install-recommends \ + openssh-server \ + git \ + ufw \ + cron \ + logrotate \ + fail2ban \ + locales \ + at \ + less \ + python3-systemd + + if [ "${ARCH}" = "arm64" ]; then + apt-get $APT_OPTIONS --yes install linux-aws initramfs-tools dosfstools + fi +} + +function setup_locale { +cat << EOF >> /etc/locale.gen +en_US.UTF-8 UTF-8 +EOF + +cat << EOF > /etc/default/locale +LANG="C.UTF-8" +LC_CTYPE="C.UTF-8" +EOF + locale-gen en_US.UTF-8 +} + +function setup_postgesql_env { + # Create the directory if it doesn't exist + sudo mkdir -p /etc/environment.d + + # Define the contents of the PostgreSQL environment file + cat </dev/null +LOCALE_ARCHIVE=/usr/lib/locale/locale-archive +LANG="en_US.UTF-8" +LANGUAGE="en_US.UTF-8" +LC_ALL="en_US.UTF-8" +LC_CTYPE="en_US.UTF-8" +EOF +} + +function install_packages_for_build { + apt-get install -y --no-install-recommends linux-libc-dev \ + acl \ + magic-wormhole sysstat \ + build-essential libreadline-dev zlib1g-dev flex bison libxml2-dev libxslt-dev libssl-dev libsystemd-dev libpq-dev libxml2-utils uuid-dev xsltproc ssl-cert \ + gcc-10 g++-10 \ + libgeos-dev libproj-dev libgdal-dev libjson-c-dev libboost-all-dev libcgal-dev libmpfr-dev libgmp-dev cmake \ + libkrb5-dev \ + maven default-jre default-jdk \ + curl gpp apt-transport-https cmake libc++-dev libc++abi-dev libc++1 libglib2.0-dev libtinfo5 libc++abi1 ninja-build python \ + liblzo2-dev + + source /etc/os-release + + apt-get install -y --no-install-recommends llvm-11-dev clang-11 + # Mark llvm as manual to prevent auto removal + apt-mark manual libllvm11:arm64 +} + +function setup_apparmor { + apt-get install -y apparmor apparmor-utils auditd + + # Copy apparmor profiles + cp -rv /tmp/apparmor_profiles/* /etc/apparmor.d/ +} + +function setup_grub_conf_arm64 { +cat << EOF > /etc/default/grub +GRUB_DEFAULT=0 +GRUB_TIMEOUT=0 +GRUB_TIMEOUT_STYLE="hidden" +GRUB_DISTRIBUTOR="Supabase postgresql" +GRUB_CMDLINE_LINUX_DEFAULT="nomodeset console=tty1 console=ttyS0 ipv6.disable=0" +EOF +} + +# Install GRUB +function install_configure_grub { + if [ "${ARCH}" = "arm64" ]; then + apt-get $APT_OPTIONS --yes install cloud-guest-utils fdisk grub-efi-arm64 efibootmgr + setup_grub_conf_arm64 + rm -rf /etc/grub.d/30_os-prober + sleep 1 + fi + grub-install /dev/xvdf && update-grub +} + +# skip fsck for first boot +function disable_fsck { + touch /fastboot +} + +# Don't request hostname during boot but set hostname +function setup_hostname { + sed -i 's/gethostname()/ubuntu /g' /etc/dhcp/dhclient.conf + sed -i 's/host-name,//g' /etc/dhcp/dhclient.conf + echo "ubuntu" > /etc/hostname + chmod 644 /etc/hostname +} + +# Set options for the default interface +function setup_eth0_interface { +cat << EOF > /etc/netplan/eth0.yaml +network: + version: 2 + ethernets: + eth0: + dhcp4: true +EOF +} + +function disable_sshd_passwd_auth { + sed -i -E -e 's/^#?\s*PasswordAuthentication\s+(yes|no)\s*$/PasswordAuthentication no/g' \ + -e 's/^#?\s*ChallengeResponseAuthentication\s+(yes|no)\s*$/ChallengeResponseAuthentication no/g' \ + /etc/ssh/sshd_config +} + +function create_admin_account { + groupadd admin +} + +#Set default target as multi-user +function set_default_target { + rm -f /etc/systemd/system/default.target + ln -s /lib/systemd/system/multi-user.target /etc/systemd/system/default.target +} + +# Setup ccache +function setup_ccache { + apt-get install ccache -y + mkdir -p /tmp/ccache + export PATH=/usr/lib/ccache:$PATH + echo "PATH=$PATH" >> /etc/environment +} + +# Clear apt caches +function cleanup_cache { + apt-get clean +} + +update_install_packages +setup_locale +setup_postgesql_env +#install_packages_for_build +install_configure_grub +setup_apparmor +setup_hostname +create_admin_account +set_default_target +setup_eth0_interface +disable_sshd_passwd_auth +disable_fsck +#setup_ccache +cleanup_cache diff --git a/ebssurrogate/scripts/surrogate-bootstrap-nix.sh b/ebssurrogate/scripts/surrogate-bootstrap-nix.sh new file mode 100755 index 000000000..5bb021d96 --- /dev/null +++ b/ebssurrogate/scripts/surrogate-bootstrap-nix.sh @@ -0,0 +1,324 @@ +#!/usr/bin/env bash +# +# This script creates filesystem and setups up chrooted +# enviroment for further processing. It also runs +# ansible playbook and finally does system cleanup. +# +# Adapted from: https://github.com/jen20/packer-ubuntu-zfs + +set -o errexit +set -o pipefail +set -o xtrace + +if [ $(dpkg --print-architecture) = "amd64" ]; +then + ARCH="amd64"; +else + ARCH="arm64"; +fi + +function waitfor_boot_finished { + export DEBIAN_FRONTEND=noninteractive + + echo "args: ${ARGS}" + # Wait for cloudinit on the surrogate to complete before making progress + while [[ ! -f /var/lib/cloud/instance/boot-finished ]]; do + echo 'Waiting for cloud-init...' + sleep 1 + done +} + +function install_packages { + # Setup Ansible on host VM + apt-get update && sudo apt-get install software-properties-common -y + add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y + ansible-galaxy collection install community.general + + # Update apt and install required packages + apt-get update + apt-get install -y \ + gdisk \ + e2fsprogs \ + debootstrap \ + nvme-cli +} + +# Partition the new root EBS volume +function create_partition_table { + + if [ "${ARCH}" = "arm64" ]; then + parted --script /dev/xvdf \ + mklabel gpt \ + mkpart UEFI 1MiB 100MiB \ + mkpart ROOT 100MiB 100% + set 1 esp on \ + set 1 boot on + parted --script /dev/xvdf print + else + sgdisk -Zg -n1:0:4095 -t1:EF02 -c1:GRUB -n2:0:0 -t2:8300 -c2:EXT4 /dev/xvdf + fi + + sleep 2 +} + +function device_partition_mappings { + # NVMe EBS launch device mappings (symlinks): /dev/nvme*n* to /dev/xvd* + declare -A blkdev_mappings + for blkdev in $(nvme list | awk '/^\/dev/ { print $1 }'); do # /dev/nvme*n* + # Mapping info from disk headers + header=$(nvme id-ctrl --raw-binary "${blkdev}" | cut -c3073-3104 | tr -s ' ' | sed 's/ $//g' | sed 's!/dev/!!') + mapping="/dev/${header%%[0-9]}" # normalize sda1 => sda + + # Create /dev/xvd* device symlink + if [[ ! -z "$mapping" ]] && [[ -b "${blkdev}" ]] && [[ ! -L "${mapping}" ]]; then + ln -s "$blkdev" "$mapping" + + blkdev_mappings["$blkdev"]="$mapping" + fi + done + + create_partition_table + + # NVMe EBS launch device partition mappings (symlinks): /dev/nvme*n*p* to /dev/xvd*[0-9]+ + declare -A partdev_mappings + for blkdev in "${!blkdev_mappings[@]}"; do # /dev/nvme*n* + mapping="${blkdev_mappings[$blkdev]}" + + # Create /dev/xvd*[0-9]+ partition device symlink + for partdev in "${blkdev}"p*; do + partnum=${partdev##*p} + if [[ ! -L "${mapping}${partnum}" ]]; then + ln -s "${blkdev}p${partnum}" "${mapping}${partnum}" + + partdev_mappings["${blkdev}p${partnum}"]="${mapping}${partnum}" + fi + done + done +} + + +#Download and install latest e2fsprogs for fast_commit feature,if required. +function format_and_mount_rootfs { + mkfs.ext4 -m0.1 /dev/xvdf2 + + mount -o noatime,nodiratime /dev/xvdf2 /mnt + if [ "${ARCH}" = "arm64" ]; then + mkfs.fat -F32 /dev/xvdf1 + mkdir -p /mnt/boot/efi + sleep 2 + mount /dev/xvdf1 /mnt/boot/efi + fi + + mkfs.ext4 /dev/xvdh + mkdir -p /mnt/data + mount -o defaults,discard /dev/xvdh /mnt/data +} + +function create_swapfile { + fallocate -l 1G /mnt/swapfile + chmod 600 /mnt/swapfile + mkswap /mnt/swapfile +} + +function format_build_partition { + mkfs.ext4 -O ^has_journal /dev/xvdc +} +function pull_docker { + apt-get install -y docker.io + docker run -itd --name ccachedata "${DOCKER_IMAGE}:${DOCKER_IMAGE_TAG}" sh + docker exec -itd ccachedata mkdir -p /build/ccache +} + +# Create fstab +function create_fstab { + FMT="%-42s %-11s %-5s %-17s %-5s %s" +cat > "/mnt/etc/fstab" << EOF +$(printf "${FMT}" "# DEVICE UUID" "MOUNTPOINT" "TYPE" "OPTIONS" "DUMP" "FSCK") +$(findmnt -no SOURCE /mnt | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/", "ext4", "defaults,discard", "0", "1" ) }') +$(findmnt -no SOURCE /mnt/boot/efi | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/boot/efi", "vfat", "umask=0077", "0", "1" ) }') +$(findmnt -no SOURCE /mnt/data | xargs blkid -o export | awk -v FMT="${FMT}" '/^UUID=/ { printf(FMT, $0, "/data", "ext4", "defaults,discard", "0", "2" ) }') +$(printf "$FMT" "/swapfile" "none" "swap" "sw" "0" "0") +EOF + unset FMT +} + +function setup_chroot_environment { + UBUNTU_VERSION=$(lsb_release -cs) # 'focal' for Ubuntu 20.04 + + # Bootstrap Ubuntu into /mnt + debootstrap --arch ${ARCH} --variant=minbase "$UBUNTU_VERSION" /mnt + + # Update ec2-region + REGION=$(curl --silent --fail http://169.254.169.254/latest/meta-data/placement/availability-zone | sed -E 's|[a-z]+$||g') + sed -i "s/REGION/${REGION}/g" /tmp/sources.list + cp /tmp/sources.list /mnt/etc/apt/sources.list + + if [ "${ARCH}" = "arm64" ]; then + create_fstab + fi + + # Create mount points and mount the filesystem + mkdir -p /mnt/{dev,proc,sys} + mount --rbind /dev /mnt/dev + mount --rbind /proc /mnt/proc + mount --rbind /sys /mnt/sys + + # Create build mount point and mount + mkdir -p /mnt/tmp + mount /dev/xvdc /mnt/tmp + chmod 777 /mnt/tmp + + # Copy apparmor profiles + chmod 644 /tmp/apparmor_profiles/* + cp -r /tmp/apparmor_profiles /mnt/tmp/ + + # Copy migrations + cp -r /tmp/migrations /mnt/tmp/ + + # Copy unit tests + cp -r /tmp/unit-tests /mnt/tmp/ + + # Copy the bootstrap script into place and execute inside chroot + cp /tmp/chroot-bootstrap-nix.sh /mnt/tmp/chroot-bootstrap-nix.sh + chroot /mnt /tmp/chroot-bootstrap-nix.sh + rm -f /mnt/tmp/chroot-bootstrap-nix.sh + echo "${POSTGRES_SUPABASE_VERSION}" > /mnt/root/supabase-release + + # Copy the nvme identification script into /sbin inside the chroot + mkdir -p /mnt/sbin + cp /tmp/ebsnvme-id /mnt/sbin/ebsnvme-id + chmod +x /mnt/sbin/ebsnvme-id + + # Copy the udev rules for identifying nvme devices into the chroot + mkdir -p /mnt/etc/udev/rules.d + cp /tmp/70-ec2-nvme-devices.rules \ + /mnt/etc/udev/rules.d/70-ec2-nvme-devices.rules + + #Copy custom cloud-init + rm -f /mnt/etc/cloud/cloud.cfg + cp /tmp/cloud.cfg /mnt/etc/cloud/cloud.cfg + + sleep 2 +} + +function download_ccache { + docker cp ccachedata:/build/ccache/. /mnt/tmp/ccache +} + +function execute_playbook { + +tee /etc/ansible/ansible.cfg <logfile 2>&1 & sleep 2 @@ -551,6 +551,11 @@ nix-update pg_prove shellcheck + ansible + ansible-lint + (packer.overrideAttrs (oldAttrs: { + version = "1.7.8"; + })) basePackages.start-server basePackages.start-client diff --git a/nix/ext/postgis.nix b/nix/ext/postgis.nix index dc9b31e34..e0b6dfbeb 100644 --- a/nix/ext/postgis.nix +++ b/nix/ext/postgis.nix @@ -13,10 +13,12 @@ , libiconv , pcre2 , nixosTests +, callPackage }: let gdal = gdalMinimal; + sfcgal = callPackage ./sfcgal/sfcgal.nix { }; in stdenv.mkDerivation rec { pname = "postgis"; @@ -29,7 +31,7 @@ stdenv.mkDerivation rec { sha256 = "sha256-miohnaAFoXMKOdGVmhx87GGbHvsAm2W+gP/CW60pkGg="; }; - buildInputs = [ libxml2 postgresql geos proj gdal json_c protobufc pcre2.dev ] + buildInputs = [ libxml2 postgresql geos proj gdal json_c protobufc pcre2.dev sfcgal ] ++ lib.optional stdenv.isDarwin libiconv; nativeBuildInputs = [ perl pkg-config ]; dontDisableStatic = true; @@ -40,7 +42,7 @@ stdenv.mkDerivation rec { preConfigure = '' sed -i 's@/usr/bin/file@${file}/bin/file@' configure - configureFlags="--datadir=$out/share/postgresql --datarootdir=$out/share/postgresql --bindir=$out/bin --docdir=$doc/share/doc/${pname} --with-gdalconfig=${gdal}/bin/gdal-config --with-jsondir=${json_c.dev} --disable-extension-upgrades-install" + configureFlags="--datadir=$out/share/postgresql --datarootdir=$out/share/postgresql --bindir=$out/bin --docdir=$doc/share/doc/${pname} --with-gdalconfig=${gdal}/bin/gdal-config --with-jsondir=${json_c.dev} --disable-extension-upgrades-install --with-sfcgal" makeFlags="PERL=${perl}/bin/perl datadir=$out/share/postgresql pkglibdir=$out/lib bindir=$out/bin docdir=$doc/share/doc/${pname}" ''; @@ -79,7 +81,7 @@ stdenv.mkDerivation rec { homepage = "https://postgis.net/"; changelog = "https://git.osgeo.org/gitea/postgis/postgis/raw/tag/${version}/NEWS"; license = licenses.gpl2; - maintainers = with maintainers; teams.geospatial.members ++ [ marcweber wolfgangwalther ]; + maintainers = with maintainers; [ samrose ]; inherit (postgresql.meta) platforms; }; } diff --git a/nix/ext/sfcgal/sfcgal.nix b/nix/ext/sfcgal/sfcgal.nix new file mode 100644 index 000000000..54d7b52cb --- /dev/null +++ b/nix/ext/sfcgal/sfcgal.nix @@ -0,0 +1,31 @@ +{ lib, stdenv, fetchFromGitLab, cgal, cmake, pkg-config, gmp, mpfr, boost }: + +stdenv.mkDerivation rec { + pname = "sfcgal"; + version = "61f3b08ade49493b56c6bafa98c7c1f84addbc10"; + + src = fetchFromGitLab { + owner = "sfcgal"; + repo = "SFCGAL"; + rev = "${version}"; + hash = "sha256-nKSqiFyMkZAYptIeShb1zFg9lYSny3kcGJfxdeTFqxw="; + }; + + nativeBuildInputs = [ cmake pkg-config cgal gmp mpfr boost ]; + + cmakeFlags = [ "-DCGAL_DIR=${cgal}" "-DCMAKE_PREFIX_PATH=${cgal}" ]; + + + postPatch = '' + substituteInPlace sfcgal.pc.in \ + --replace '$'{prefix}/@CMAKE_INSTALL_LIBDIR@ @CMAKE_INSTALL_FULL_LIBDIR@ + ''; + + meta = with lib; { + description = "A wrapper around CGAL that intents to implement 2D and 3D operations on OGC standards models"; + homepage = "https://sfcgal.gitlab.io/SFCGAL/"; + license = with licenses; [ gpl3Plus lgpl3Plus]; + platforms = platforms.all; + maintainers = with maintainers; [ samrose ]; + }; +} diff --git a/nix/ext/timescaledb.nix b/nix/ext/timescaledb.nix index d5bb60423..4c3f2ef1e 100644 --- a/nix/ext/timescaledb.nix +++ b/nix/ext/timescaledb.nix @@ -11,7 +11,7 @@ stdenv.mkDerivation rec { owner = "timescale"; repo = "timescaledb"; rev = version; - hash = "sha256-gJViEWHtIczvIiQKuvvuwCfWJMxAYoBhCHhD75no6r0="; + hash = "sha256-fvVSxDiGZAewyuQ2vZDb0I6tmlDXl6trjZp8+qDBtb8="; }; cmakeFlags = [ "-DSEND_TELEMETRY_DEFAULT=OFF" "-DREGRESS_CHECKS=OFF" "-DTAP_CHECKS=OFF" ] diff --git a/nix/ext/wrappers/default.nix b/nix/ext/wrappers/default.nix index 845bf64b6..c68641659 100644 --- a/nix/ext/wrappers/default.nix +++ b/nix/ext/wrappers/default.nix @@ -11,14 +11,14 @@ buildPgrxExtension_0_11_3 rec { pname = "supabase-wrappers"; - version = "0.3.1"; + version = "0.4.1"; inherit postgresql; src = fetchFromGitHub { owner = "supabase"; repo = "wrappers"; rev = "v${version}"; - hash = "sha256-ZwTw0USJC/F/ZW5usX7p0CB8p2YzeUb6OLiMF3D1+J4="; + hash = "sha256-AU9Y43qEMcIBVBThu+Aor1HCtfFIg+CdkzK9IxVdkzM="; }; nativeBuildInputs = [ pkg-config cargo ]; diff --git a/nix/overlays/postgis.nix b/nix/overlays/postgis.nix deleted file mode 100644 index 8d022f564..000000000 --- a/nix/overlays/postgis.nix +++ /dev/null @@ -1,7 +0,0 @@ -final: prev: { - postgis = prev.postgresqlPackages.postgis.overrideAttrs (old: { - version = "3.3.2"; - sha256 = ""; - }); - postgresqlPackages.postgis = final.postgis; -} diff --git a/nix/tests/postgresql.conf.in b/nix/tests/postgresql.conf.in index 22d1b93fc..4c5075aa1 100644 --- a/nix/tests/postgresql.conf.in +++ b/nix/tests/postgresql.conf.in @@ -793,4 +793,3 @@ shared_preload_libraries = 'auto_explain,pgsodium' # Add settings for extensions here pgsodium.getkey_script = '@PGSODIUM_GETKEY_SCRIPT@' -pljava.libjvm_location = '@PLJAVA_LIBJVM_LOCATION@' diff --git a/nix/tests/prime.sql b/nix/tests/prime.sql index 5a0e88f58..084ad3b52 100644 --- a/nix/tests/prime.sql +++ b/nix/tests/prime.sql @@ -20,4 +20,3 @@ CREATE EXTENSION IF NOT EXISTS pg_graphql; CREATE EXTENSION IF NOT EXISTS pg_jsonschema; CREATE EXTENSION IF NOT EXISTS hypopg; CREATE EXTENSION IF NOT EXISTS index_advisor; -CREATE EXTENSION IF NOT EXISTS pljava; diff --git a/scripts/nix-provision.sh b/scripts/nix-provision.sh new file mode 100644 index 000000000..223e84926 --- /dev/null +++ b/scripts/nix-provision.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# shellcheck shell=bash + +set -o errexit +set -o pipefail +set -o xtrace + +function install_packages { + # Setup Ansible on host VM + sudo apt-get update && sudo apt-get install software-properties-common -y + sudo add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y + ansible-galaxy collection install community.general + +} + + + +function install_nix() { + sudo su -c "curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf \"substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com\" \ + --extra-conf \"trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=\" " -s /bin/bash root + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + +} + + +function execute_stage2_playbook { + sudo tee /etc/ansible/ansible.cfg < str: + return base64.b64encode(gzip.compress(s.encode())).decode() + + instance = list( + ec2.create_instances( + BlockDeviceMappings=[ + { + "DeviceName": "/dev/sda1", + "Ebs": { + "VolumeSize": 8, # gb + "Encrypted": True, + "DeleteOnTermination": True, + "VolumeType": "gp3", + }, + }, + ], + MetadataOptions={ + "HttpTokens": "required", + "HttpEndpoint": "enabled", + }, + IamInstanceProfile={"Name": "pg-ap-southeast-1"}, + InstanceType="t4g.micro", + MinCount=1, + MaxCount=1, + ImageId=image.id, + NetworkInterfaces=[ + { + "DeviceIndex": 0, + "AssociatePublicIpAddress": True, + "Groups": ["sg-0a883ca614ebfbae0", "sg-014d326be5a1627dc"], + } + ], + UserData=f"""#cloud-config +hostname: db-aaaaaaaaaaaaaaaaaaaa +write_files: + - {{path: /etc/postgresql.schema.sql, content: {gzip_then_base64_encode(postgresql_schema_sql_content)}, permissions: '0600', encoding: gz+b64}} + - {{path: /etc/realtime.env, content: {gzip_then_base64_encode(realtime_env_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/adminapi/adminapi.yaml, content: {gzip_then_base64_encode(adminapi_yaml_content)}, permissions: '0600', owner: 'adminapi:root', encoding: gz+b64}} + - {{path: /etc/postgresql-custom/pgsodium_root.key, content: {gzip_then_base64_encode(pgsodium_root_key_content)}, permissions: '0600', owner: 'postgres:postgres', encoding: gz+b64}} + - {{path: /etc/postgrest/base.conf, content: {gzip_then_base64_encode(postgrest_base_conf_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/gotrue.env, content: {gzip_then_base64_encode(gotrue_env_content)}, permissions: '0664', encoding: gz+b64}} + - {{path: /etc/wal-g/config.json, content: {gzip_then_base64_encode(walg_config_json_content)}, permissions: '0664', owner: 'wal-g:wal-g', encoding: gz+b64}} + - {{path: /tmp/init.json, content: {gzip_then_base64_encode(init_json_content)}, permissions: '0600', encoding: gz+b64}} +runcmd: + - 'sudo echo \"pgbouncer\" \"postgres\" >> /etc/pgbouncer/userlist.txt' + - 'cd /tmp && aws s3 cp --region ap-southeast-1 s3://init-scripts-staging/project/init.sh .' + - 'bash init.sh "staging"' + - 'rm -rf /tmp/*' +""", + TagSpecifications=[ + { + "ResourceType": "instance", + "Tags": [ + {"Key": "Name", "Value": "ci-ami-test-nix"}, + {"Key": "creator", "Value": "testinfra-ci"}, + {"Key": "testinfra-run-id", "Value": RUN_ID} + ], + } + ], + ) + )[0] + instance.wait_until_running() + + ec2logger = EC2InstanceConnectLogger(debug=False) + temp_key = EC2InstanceConnectKey(ec2logger.get_logger()) + ec2ic = boto3.client("ec2-instance-connect", region_name="ap-southeast-1") + response = ec2ic.send_ssh_public_key( + InstanceId=instance.id, + InstanceOSUser="ubuntu", + SSHPublicKey=temp_key.get_pub_key(), + ) + assert response["Success"] + + # instance doesn't have public ip yet + while not instance.public_ip_address: + logger.warning("waiting for ip to be available") + sleep(5) + instance.reload() + + while True: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if sock.connect_ex((instance.public_ip_address, 22)) == 0: + break + else: + logger.warning("waiting for ssh to be available") + sleep(10) + + host = testinfra.get_host( + # paramiko is an ssh backend + f"paramiko://ubuntu@{instance.public_ip_address}?timeout=60", + ssh_identity_file=temp_key.get_priv_key_file(), + ) + + def is_healthy(host) -> bool: + cmd = host.run("sudo -u postgres /usr/bin/pg_isready -U postgres") + if cmd.failed is True: + logger.warning("pg not ready") + return False + + cmd = host.run(f"curl -sf -k --connect-timeout 30 --max-time 60 https://localhost:8085/health -H 'apikey: {supabase_admin_key}'") + if cmd.failed is True: + logger.warning("adminapi not ready") + return False + + cmd = host.run("curl -sf --connect-timeout 30 --max-time 60 http://localhost:3001/ready") + if cmd.failed is True: + logger.warning("postgrest not ready") + return False + + cmd = host.run("curl -sf --connect-timeout 30 --max-time 60 http://localhost:8081/health") + if cmd.failed is True: + logger.warning("gotrue not ready") + return False + + # TODO(thebengeu): switch to checking Envoy once it's the default. + cmd = host.run("sudo kong health") + if cmd.failed is True: + logger.warning("kong not ready") + return False + + cmd = host.run("sudo fail2ban-client status") + if cmd.failed is True: + logger.warning("fail2ban not ready") + return False + + return True + + while True: + if is_healthy(host): + break + sleep(1) + + # return a testinfra connection to the instance + yield host + + # at the end of the test suite, destroy the instance + instance.terminate() + + +def test_postgrest_is_running(host): + postgrest = host.service("postgrest") + assert postgrest.is_running + + +def test_postgrest_responds_to_requests(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/", + headers={ + "apikey": anon_key, + "authorization": f"Bearer {anon_key}", + }, + ) + assert res.ok + + +def test_postgrest_can_connect_to_db(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "apikey": service_role_key, + "authorization": f"Bearer {service_role_key}", + "accept-profile": "storage", + }, + ) + assert res.ok + + +# There would be an error if the `apikey` query parameter isn't removed, +# since PostgREST treats query parameters as conditions. +# +# Worth testing since remove_apikey_query_parameter.lua uses regexp instead +# of parsed query parameters. +def test_postgrest_starting_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "apikey": service_role_key, + "id": "eq.absent", + "name": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_middle_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "apikey": service_role_key, + "name": "eq.absent", + }, + ) + assert res.ok + + +def test_postgrest_ending_apikey_query_parameter_is_removed(host): + res = requests.get( + f"http://{host.backend.get_hostname()}/rest/v1/buckets", + headers={ + "accept-profile": "storage", + }, + params={ + "id": "eq.absent", + "name": "eq.absent", + "apikey": service_role_key, + }, + ) + assert res.ok