diff --git a/.github/workflows/ami-release-nix.yml b/.github/workflows/ami-release-nix.yml index 3242a4e1d..1d57ea23b 100644 --- a/.github/workflows/ami-release-nix.yml +++ b/.github/workflows/ami-release-nix.yml @@ -78,6 +78,7 @@ jobs: run: | packer init amazon-arm64-nix.pkr.hcl GIT_SHA=${{github.sha}} + # why is postgresql_major defined here instead of where the _three_ other postgresql_* variables are defined? packer build -var "git-head-version=${GIT_SHA}" -var "packer-execution-id=${GITHUB_RUN_ID}" -var-file="development-arm.vars.pkr.hcl" -var-file="common-nix.vars.pkr.hcl" -var "ansible_arguments=-e postgresql_major=${POSTGRES_MAJOR_VERSION}" amazon-arm64-nix.pkr.hcl - name: Build AMI stage 2 @@ -143,7 +144,7 @@ jobs: aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz - name: Create release - uses: softprops/action-gh-release@v1 + uses: softprops/action-gh-release@v2 with: name: ${{ steps.process_release_version.outputs.version }} tag_name: ${{ steps.process_release_version.outputs.version }} diff --git a/.github/workflows/manual-docker-release.yml b/.github/workflows/manual-docker-release.yml new file mode 100644 index 000000000..b9b66b305 --- /dev/null +++ b/.github/workflows/manual-docker-release.yml @@ -0,0 +1,250 @@ +name: Manual Docker Artifacts Release + +on: + workflow_dispatch: + inputs: + postgresVersion: + description: 'Optional. Postgres version to publish against, i.e. 15.1.1.78' + required: false + +jobs: + prepare: + runs-on: ubuntu-latest + outputs: + matrix_config: ${{ steps.set-matrix.outputs.matrix_config }} + steps: + - uses: DeterminateSystems/nix-installer-action@main + - name: Checkout Repo + uses: actions/checkout@v3 + - name: Generate build matrix + id: set-matrix + run: | + nix run nixpkgs#nushell -- -c 'let versions = (open ansible/vars.yml | get postgres_major) + let matrix = ($versions | each { |ver| + let version = ($ver | str trim) + let dockerfile = $"Dockerfile-($version)" + if ($dockerfile | path exists) { + { + version: $version, + dockerfile: $dockerfile + } + } else { + null + } + } | compact) + + let matrix_config = { + include: $matrix + } + + $"matrix_config=($matrix_config | to json -r)" | save --append $env.GITHUB_OUTPUT' + build: + needs: prepare + strategy: + matrix: ${{ fromJson(needs.prepare.outputs.matrix_config) }} + runs-on: ubuntu-latest + outputs: + build_args: ${{ steps.args.outputs.result }} + steps: + - uses: actions/checkout@v3 + - uses: DeterminateSystems/nix-installer-action@main + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.version }}" >> $GITHUB_ENV + + - id: args + run: | + nix run nixpkgs#nushell -- -c ' + open ansible/vars.yml + | items { |key value| {name: $key, item: $value} } + | where { |it| ($it.item | describe) == "string" } + | each { |it| $"($it.name)=($it.item)" } + | str join "\n" + | save --append $env.GITHUB_OUTPUT + ' + build_release_image: + needs: [prepare, build] + strategy: + matrix: + postgres: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} + arch: [amd64, arm64] + runs-on: ${{ matrix.arch == 'amd64' && 'ubuntu-latest' || 'arm-runner' }} + timeout-minutes: 180 + steps: + - uses: actions/checkout@v3 + - uses: DeterminateSystems/nix-installer-action@main + - run: docker context create builders + - uses: docker/setup-buildx-action@v3 + with: + endpoint: builders + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Get image tag + id: image + run: | + if [[ "${{ matrix.arch }}" == "arm64" ]]; then + pg_version=$(sudo nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.postgres.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + let base_version = (open ansible/vars.yml | get postgres_release | get $release_key | str trim) + let final_version = if "${{ inputs.postgresVersion }}" != "" { + "${{ inputs.postgresVersion }}" + } else { + $base_version + } + $final_version | str trim + ') + echo "pg_version=supabase/postgres:$pg_version" >> $GITHUB_OUTPUT + else + pg_version=$(nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.postgres.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + let base_version = (open ansible/vars.yml | get postgres_release | get $release_key | str trim) + let final_version = if "${{ inputs.postgresVersion }}" != "" { + "${{ inputs.postgresVersion }}" + } else { + $base_version + } + $final_version | str trim + ') + echo "pg_version=supabase/postgres:$pg_version" >> $GITHUB_OUTPUT + fi + - id: build + uses: docker/build-push-action@v5 + with: + push: true + build-args: | + ${{ needs.build.outputs.build_args }} + target: production + tags: ${{ steps.image.outputs.pg_version }}_${{ matrix.arch }} + platforms: linux/${{ matrix.arch }} + cache-from: type=gha,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + cache-to: type=gha,mode=max,scope=${{ github.ref_name }}-latest-${{ matrix.arch }} + file: ${{ matrix.postgres.dockerfile }} + merge_manifest: + needs: [prepare, build, build_release_image] + strategy: + matrix: + include: ${{ fromJson(needs.prepare.outputs.matrix_config).include }} + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: DeterminateSystems/nix-installer-action@main + - uses: docker/setup-buildx-action@v3 + - uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + - name: Get image tag + id: get_version + run: | + nix run nixpkgs#nushell -- -c ' + let version = "${{ matrix.version }}" + let release_key = if ($version | str contains "orioledb") { + $"postgresorioledb-17" + } else { + $"postgres($version)" + } + let pg_version = (open ansible/vars.yml | get postgres_release | get $release_key | str trim) + $"pg_version=supabase/postgres:($pg_version)" | save --append $env.GITHUB_OUTPUT + ' + - name: Output version + id: output_version + run: | + echo "result=${{ steps.get_version.outputs.pg_version }}" >> $GITHUB_OUTPUT + - name: Collect versions + id: collect_versions + run: | + echo "${{ steps.output_version.outputs.result }}" >> results.txt # Append results + - name: Upload Results Artifact + uses: actions/upload-artifact@v4 + with: + name: merge_results-${{ matrix.version }} + path: results.txt + if-no-files-found: warn + - name: Merge multi-arch manifests + run: | + docker buildx imagetools create -t ${{ steps.get_version.outputs.pg_version }} \ + ${{ steps.get_version.outputs.pg_version }}_amd64 \ + ${{ steps.get_version.outputs.pg_version }}_arm64 + combine_results: + needs: [prepare, merge_manifest] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: DeterminateSystems/nix-installer-action@main + + - name: Debug Input from Prepare + run: | + echo "Raw matrix_config output:" + echo "${{ needs.prepare.outputs.matrix_config }}" + - name: Get Versions from Matrix Config + id: get_versions + run: | + nix run nixpkgs#nushell -- -c ' + # Parse the matrix configuration directly + let matrix_config = (${{ toJson(needs.prepare.outputs.matrix_config) }} | from json) + + # Get versions directly from include array + let versions = ($matrix_config.include | get version) + + echo "Versions: $versions" + + # Convert the versions to a comma-separated string + let versions_str = ($versions | str join ",") + $"versions=$versions_str" | save --append $env.GITHUB_ENV + ' + - name: Download Results Artifacts + uses: actions/download-artifact@v4 + with: + pattern: merge_results-* + - name: Combine Results + id: combine + run: | + nix run nixpkgs#nushell -- -c ' + # Get all results files and process them in one go + let files = (ls **/results.txt | get name) + echo $"Found files: ($files)" + + let matrix = { + include: ( + $files + | each { |file| open $file } # Open each file + | each { |content| $content | lines } # Split into lines + | flatten # Flatten the nested lists + | where { |line| $line != "" } # Filter empty lines + | each { |line| + # Extract just the version part after the last colon + let version = ($line | parse "supabase/postgres:{version}" | get version.0) + {version: $version} + } + ) + } + + let json_output = ($matrix | to json -r) # -r for raw output + echo $"Debug output: ($json_output)" + + $"matrix=($json_output)" | save --append $env.GITHUB_OUTPUT + ' + - name: Debug Combined Results + run: | + echo "Combined Results: '${{ steps.combine.outputs.matrix }}'" + outputs: + matrix: ${{ steps.combine.outputs.matrix }} + publish: + needs: combine_results + strategy: + matrix: ${{ fromJson(needs.combine_results.outputs.matrix) }} + uses: ./.github/workflows/mirror.yml + with: + version: ${{ inputs.postgresVersion != '' && inputs.postgresVersion || matrix.version }} + secrets: inherit diff --git a/.github/workflows/qemu-image-build.yml b/.github/workflows/qemu-image-build.yml new file mode 100644 index 000000000..464ee01fe --- /dev/null +++ b/.github/workflows/qemu-image-build.yml @@ -0,0 +1,185 @@ +name: Build QEMU image + +on: + push: + branches: + - develop + - release/* + paths: + - '.github/workflows/qemu-image-build.yml' + - 'qemu-arm64-nix.pkr.hcl' + - 'common-nix.vars.pkr.hcl' + - 'ansible/vars.yml' + workflow_dispatch: + +jobs: + prepare: + runs-on: ubuntu-latest + outputs: + postgres_versions: ${{ steps.set-versions.outputs.postgres_versions }} + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Set PostgreSQL versions - only builds pg15 atm + id: set-versions + run: | + VERSIONS=$(nix run nixpkgs#yq -- '.postgres_major[0]' ansible/vars.yml | nix run nixpkgs#jq -- -R -s -c 'split("\n")[:-1]') + echo "postgres_versions=$VERSIONS" >> $GITHUB_OUTPUT + + build: + needs: prepare + strategy: + matrix: + postgres_version: ${{ fromJson(needs.prepare.outputs.postgres_versions) }} + runs-on: arm-native-runner + timeout-minutes: 150 + permissions: + contents: write + packages: write + id-token: write + + steps: + - name: Checkout Repo + uses: actions/checkout@v3 + + - uses: DeterminateSystems/nix-installer-action@main + + - name: Run checks if triggered manually + if: ${{ github.event_name == 'workflow_dispatch' }} + run: | + SUFFIX=$(sudo nix run nixpkgs#yq -- ".postgres_release[\"postgres${{ matrix.postgres_version }}\"]" ansible/vars.yml | sed -E 's/[0-9\.]+(.*)$/\1/') + if [[ -z $SUFFIX ]] ; then + echo "Version must include non-numeric characters if built manually." + exit 1 + fi + + - name: enable KVM support + run: | + sudo chown runner /dev/kvm + sudo chmod 666 /dev/kvm + + - name: Set PostgreSQL version environment variable + run: echo "POSTGRES_MAJOR_VERSION=${{ matrix.postgres_version }}" >> $GITHUB_ENV + + - name: Generate common-nix.vars.pkr.hcl + run: | + curl -L https://github.com/mikefarah/yq/releases/download/v4.45.1/yq_linux_arm64 -o yq && chmod +x yq + PG_VERSION=$(./yq '.postgres_release["postgres'${{ matrix.postgres_version }}'"]' ansible/vars.yml) + PG_VERSION=$(echo $PG_VERSION | tr -d '"') # Remove any surrounding quotes + echo 'postgres-version = "'$PG_VERSION'"' > common-nix.vars.pkr.hcl + echo 'postgres-major-version = "'$POSTGRES_MAJOR_VERSION'"' >> common-nix.vars.pkr.hcl + # Ensure there's a newline at the end of the file + echo "" >> common-nix.vars.pkr.hcl + + # TODO (darora): not quite sure why I'm having to uninstall and re-install these deps, but the build fails w/o this + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get remove -y qemu-efi-aarch64 cloud-image-utils qemu-system-arm qemu-utils + sudo apt-get install -y qemu-efi-aarch64 cloud-image-utils qemu-system-arm qemu-utils + + - name: Build QEMU artifact + run: | + make init + GIT_SHA=${{github.sha}} + export PACKER_LOG=1 + packer build -var "git_sha=${GIT_SHA}" -var-file="common-nix.vars.pkr.hcl" qemu-arm64-nix.pkr.hcl + + - name: Grab release version + id: process_release_version + run: | + VERSION=$(cat common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + echo "version=$VERSION" >> $GITHUB_OUTPUT + + # - name: Create nix flake revision tarball + # run: | + # GIT_SHA=${{github.sha}} + # MAJOR_VERSION=${{ env.POSTGRES_MAJOR_VERSION }} + + # mkdir -p "/tmp/pg_upgrade_bin/${MAJOR_VERSION}" + # echo "$GIT_SHA" >> "/tmp/pg_upgrade_bin/${MAJOR_VERSION}/nix_flake_version" + # tar -czf "/tmp/pg_binaries.tar.gz" -C "/tmp/pg_upgrade_bin" . + + - name: configure aws credentials - staging + uses: aws-actions/configure-aws-credentials@v4 + with: + role-to-assume: ${{ secrets.DEV_AWS_ROLE }} + aws-region: "us-east-1" + + - name: Login to Amazon ECR Public + id: login-ecr-public + uses: aws-actions/amazon-ecr-login@v2 + with: + registry-type: public + + - name: Build, tag, and push docker image to Amazon ECR Public + env: + REGISTRY: public.ecr.aws/w9p6e7k7 + REGISTRY_ALIAS: supabase + REPOSITORY: postgres-vm-image + IMAGE_TAG: ${{ steps.process_release_version.outputs.version }} + run: | + docker build -f Dockerfile-kubernetes -t $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG . + docker push $REGISTRY/$REGISTRY_ALIAS/$REPOSITORY:$IMAGE_TAG + + # - name: Upload software manifest to s3 staging + # run: | + # cd ansible + # ansible-playbook -i localhost \ + # -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + # -e "internal_artifacts_bucket=${{ secrets.ARTIFACTS_BUCKET }}" \ + # -e "postgres_major_version=${{ env.POSTGRES_MAJOR_VERSION }}" \ + # manifest-playbook.yml + + # - name: Upload nix flake revision to s3 staging + # run: | + # aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + + # - name: configure aws credentials - prod + # uses: aws-actions/configure-aws-credentials@v4 + # with: + # role-to-assume: ${{ secrets.PROD_AWS_ROLE }} + # aws-region: "us-east-1" + + # - name: Upload software manifest to s3 prod + # run: | + # cd ansible + # ansible-playbook -i localhost \ + # -e "ami_release_version=${{ steps.process_release_version.outputs.version }}" \ + # -e "internal_artifacts_bucket=${{ secrets.PROD_ARTIFACTS_BUCKET }}" \ + # -e "postgres_major_version=${{ env.POSTGRES_MAJOR_VERSION }}" \ + # manifest-playbook.yml + + # - name: Upload nix flake revision to s3 prod + # run: | + # aws s3 cp /tmp/pg_binaries.tar.gz s3://${{ secrets.PROD_ARTIFACTS_BUCKET }}/upgrades/postgres/supabase-postgres-${{ steps.process_release_version.outputs.version }}/20.04.tar.gz + + # - name: Create release + # uses: softprops/action-gh-release@v1 + # with: + # name: ${{ steps.process_release_version.outputs.version }} + # tag_name: ${{ steps.process_release_version.outputs.version }} + # target_commitish: ${{github.sha}} + + # - name: Slack Notification on Failure + # if: ${{ failure() }} + # uses: rtCamp/action-slack-notify@v2 + # env: + # SLACK_WEBHOOK: ${{ secrets.SLACK_NOTIFICATIONS_WEBHOOK }} + # SLACK_USERNAME: 'gha-failures-notifier' + # SLACK_COLOR: 'danger' + # SLACK_MESSAGE: 'Building Postgres AMI failed' + # SLACK_FOOTER: '' + + - name: Cleanup resources after build + if: ${{ always() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids + + - name: Cleanup resources on build cancellation + if: ${{ cancelled() }} + run: | + aws ec2 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --instance-ids diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b6912b38f..6dc194684 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -76,7 +76,7 @@ jobs: echo "EOF" >> $GITHUB_OUTPUT - name: verify schema.sql is committed run: | - nix run github:supabase/postgres/${{ github.sha }}#dbmate-tool -- --version ${{ env.PGMAJOR }} + nix run github:supabase/postgres/${{ github.sha }}#dbmate-tool -- --version ${{ env.PGMAJOR }} --flake-url github:supabase/postgres/${{ github.sha }} if ! git diff --exit-code --quiet migrations/schema-${{ env.PGMAJOR }}.sql; then echo "Detected changes in schema.sql:" git diff migrations/schema-${{ env.PGMAJOR }}.sql diff --git a/.gitignore b/.gitignore index 45464e3f0..005d3ece6 100644 --- a/.gitignore +++ b/.gitignore @@ -23,4 +23,4 @@ result* .idea/ .vscode/ -db +db/schema.sql diff --git a/Dockerfile-kubernetes b/Dockerfile-kubernetes new file mode 100644 index 000000000..716e72b42 --- /dev/null +++ b/Dockerfile-kubernetes @@ -0,0 +1,9 @@ +FROM alpine:3.21 + +ADD ./output-cloudimg/packer-cloudimg /disk/focal.qcow2 + +RUN apk add --no-cache qemu-system-aarch64 qemu-img openssh-client nftables cloud-utils-localds aavmf +# dev stuff +# RUN apk add --no-cache iproute2 + +CMD exec /bin/sh -c "trap : TERM INT; sleep 9999999999d & wait" diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..5bef8a430 --- /dev/null +++ b/Makefile @@ -0,0 +1,20 @@ +UPSTREAM_NIX_GIT_SHA := $(shell git rev-parse HEAD) +GIT_SHA := $(shell git describe --tags --always --dirty) + +init: qemu-arm64-nix.pkr.hcl + packer init qemu-arm64-nix.pkr.hcl + +output-cloudimg/packer-cloudimg: ansible qemu-arm64-nix.pkr.hcl + packer build -var "git_sha=$(UPSTREAM_NIX_GIT_SHA)" qemu-arm64-nix.pkr.hcl + +disk/focal-raw.img: output-cloudimg/packer-cloudimg + mkdir -p disk + sudo qemu-img convert -O raw output-cloudimg/packer-cloudimg disk/focal-raw.img + +alpine-image: output-cloudimg/packer-cloudimg + sudo nerdctl build . -t supabase-postgres-test:$(GIT_SHA) -f ./Dockerfile-kubernetes + +clean: + rm -rf output-cloudimg + +.PHONY: alpine-image init clean diff --git a/amazon-arm64-nix.pkr.hcl b/amazon-arm64-nix.pkr.hcl index 118196473..ec427ff9f 100644 --- a/amazon-arm64-nix.pkr.hcl +++ b/amazon-arm64-nix.pkr.hcl @@ -264,7 +264,7 @@ build { ] use_env_var_file = true script = "ebssurrogate/scripts/surrogate-bootstrap-nix.sh" - execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && {{.Path}}'" + execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && cd /tmp/ansible-playbook && {{.Path}}'" start_retry_timeout = "5m" skip_clean = true } diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh index 515c490f6..c2367116d 100755 --- a/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/complete.sh @@ -150,6 +150,43 @@ EOF run_sql -c "$PATCH_PGMQ_QUERY" run_sql -c "update pg_extension set extowner = 'postgres'::regrole where extname = 'pgmq';" + + # Patch to handle upgrading to pgsodium-less Vault + REENCRYPT_VAULT_SECRETS_QUERY=$(cat <- + find /var/lib/postgresql/.nix-profile/bin/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "{{ item }}/$(basename $0)"' {} \; + loop: + - /usr/lib/postgresql/bin + - /usr/bin become: yes when: stage2_nix @@ -194,23 +193,9 @@ when: pg_config_stat.stat.exists and not pg_config_stat.stat.islnk and stage2_nix become: yes -- name: Create symbolic links from /var/lib/postgresql/.nix-profile/bin to /usr/bin - file: - src: "{{ item }}" - dest: "/usr/bin/{{ item | basename }}" - state: link - with_fileglob: - - "/var/lib/postgresql/.nix-profile/bin/*" - become: yes - when: stage2_nix - - name: Ensure postgres user has ownership of symlink - file: - path: "/usr/bin/{{ item | basename }}" - owner: postgres - group: postgres - with_fileglob: - - "/var/lib/postgresql/.nix-profile/bin/*" + shell: >- + find /var/lib/postgresql/.nix-profile/bin/ -maxdepth 1 -type f,l -exec chown postgres:postgres "/usr/bin/$(basename {})" \; become: yes when: stage2_nix @@ -225,22 +210,14 @@ # It was decided to leave pljava disabled at https://github.com/supabase/postgres/pull/690 therefore removing this task - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql to /usr/lib/postgresql/share/postgresql - file: - src: "{{ item }}" - dest: "/usr/lib/postgresql/share/postgresql/{{ item | basename }}" - state: link - with_fileglob: - - "/var/lib/postgresql/.nix-profile/share/postgresql/*" + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/$(basename $0)"' {} \; become: yes when: stage2_nix - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/extension to /usr/lib/postgresql/share/postgresql/extension - file: - src: "{{ item }}" - dest: "/usr/lib/postgresql/share/postgresql/extension/{{ item | basename }}" - state: link - with_fileglob: - - "/var/lib/postgresql/.nix-profile/share/postgresql/extension/*" + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/extension/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/extension/$(basename $0)"' {} \; become: yes when: stage2_nix @@ -266,22 +243,14 @@ when: stage2_nix and not is_psql_oriole - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/timezonesets to /usr/lib/postgresql/share/postgresql/timeszonesets - file: - src: "{{ item }}" - dest: "/usr/lib/postgresql/share/postgresql/timezonesets/{{ item | basename }}" - state: link - with_fileglob: - - "/var/lib/postgresql/.nix-profile/share/postgresql/timezonesets/*" + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/timezonesets/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/timezonesets/$(basename $0)"' {} \; become: yes when: stage2_nix - name: Create symbolic links from /var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data to /usr/lib/postgresql/share/postgresql/tsearch_data - file: - src: "{{ item }}" - dest: "/usr/lib/postgresql/share/postgresql/tsearch_data/{{ item | basename }}" - state: link - with_fileglob: - - "/var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data/*" + shell: >- + find /var/lib/postgresql/.nix-profile/share/postgresql/tsearch_data/ -maxdepth 1 -type f,l -exec sh -c 'ln -s "$0" "/usr/lib/postgresql/share/postgresql/tsearch_data/$(basename $0)"' {} \; become: yes when: stage2_nix diff --git a/ansible/tasks/test-image.yml b/ansible/tasks/test-image.yml index 9a8d4fa27..c680ce68e 100644 --- a/ansible/tasks/test-image.yml +++ b/ansible/tasks/test-image.yml @@ -11,17 +11,34 @@ # cmd: sed -i.bak -e "s/pg_net,\ pgsodium,\ timescaledb/pg_net,\ timescaledb/g" -e "s/pgsodium.getkey_script=/#pgsodium.getkey_script=/g" /etc/postgresql/postgresql.conf # when: debpkg_mode or stage2_nix -- name: Temporarily disable PG Sodium references in config +- name: Temporarily disable PG Sodium and Supabase Vault references in config become: yes become_user: postgres shell: cmd: > - sed -i.bak - -e 's/\(shared_preload_libraries = '\''.*\)pgsodium,\(.*'\''\)/\1\2/' + sed -i.bak + -e 's/\(shared_preload_libraries = '\''.*\)pgsodium,\(.*'\''\)/\1\2/' + -e 's/\(shared_preload_libraries = '\''.*\)supabase_vault,\(.*'\''\)/\1\2/' + -e 's/\(shared_preload_libraries = '\''.*\), *supabase_vault'\''/\1'\''/' -e 's/pgsodium.getkey_script=/#pgsodium.getkey_script=/' /etc/postgresql/postgresql.conf when: debpkg_mode or stage2_nix +- name: Verify pgsodium and vault removal from config + become: yes + become_user: postgres + shell: + cmd: | + FOUND=$(grep -E "shared_preload_libraries.*pgsodium|shared_preload_libraries.*supabase_vault|^pgsodium\.getkey_script" /etc/postgresql/postgresql.conf) + if [ ! -z "$FOUND" ]; then + echo "Found unremoved references:" + echo "$FOUND" + exit 1 + fi + register: verify_result + failed_when: verify_result.rc != 0 + when: debpkg_mode or stage2_nix + - name: Start Postgres Database to load all extensions. become: yes become_user: postgres diff --git a/ansible/vars.yml b/ansible/vars.yml index 9671ec2aa..206cb139f 100644 --- a/ansible/vars.yml +++ b/ansible/vars.yml @@ -8,8 +8,8 @@ postgres_major: # Full version strings for each major version postgres_release: - postgresorioledb-17: "17.0.1.31-orioledb" - postgres15: "15.8.1.035" + postgresorioledb-17: "17.0.1.035-orioledb" + postgres15: "15.8.1.039" # Non Postgres Extensions pgbouncer_release: "1.19.0" @@ -25,7 +25,7 @@ postgrest_x86_release_checksum: sha1:61c513f91a8931be4062587b9d4a18b42acf5c05 gotrue_release: 2.169.0 gotrue_release_checksum: sha1:1419b94683aac7ddc30355408b8e8b79e61146c4 -aws_cli_release: "2.2.7" +aws_cli_release: "2.23.11" salt_minion_version: 3007 diff --git a/ebssurrogate/files/unit-tests/unit-test-01.sql b/ebssurrogate/files/unit-tests/unit-test-01.sql index f3d47459f..c466af12e 100644 --- a/ebssurrogate/files/unit-tests/unit-test-01.sql +++ b/ebssurrogate/files/unit-tests/unit-test-01.sql @@ -17,7 +17,6 @@ BEGIN extension_array := ARRAY[ 'plpgsql', 'pg_stat_statements', - 'pgsodium', 'pgtap', 'pg_graphql', 'pgcrypto', @@ -30,7 +29,6 @@ BEGIN extension_array := ARRAY[ 'plpgsql', 'pg_stat_statements', - 'pgsodium', 'pgtap', 'pg_graphql', 'pgcrypto', @@ -44,7 +42,7 @@ BEGIN PERFORM set_config('myapp.extensions', array_to_string(extension_array, ','), false); END $$; -SELECT plan(8); +SELECT no_plan(); SELECT extensions_are( string_to_array(current_setting('myapp.extensions'), ',')::text[] @@ -56,9 +54,5 @@ SELECT has_schema('pg_catalog'); SELECT has_schema('information_schema'); SELECT has_schema('public'); -SELECT function_privs_are('pgsodium', 'crypto_aead_det_decrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); -SELECT function_privs_are('pgsodium', 'crypto_aead_det_encrypt', array['bytea', 'bytea', 'uuid', 'bytea'], 'service_role', array['EXECUTE']); -SELECT function_privs_are('pgsodium', 'crypto_aead_det_keygen', array[]::text[], 'service_role', array['EXECUTE']); - SELECT * FROM finish(); -ROLLBACK; \ No newline at end of file +ROLLBACK; diff --git a/ebssurrogate/scripts/qemu-bootstrap-nix.sh b/ebssurrogate/scripts/qemu-bootstrap-nix.sh new file mode 100755 index 000000000..61606c81e --- /dev/null +++ b/ebssurrogate/scripts/qemu-bootstrap-nix.sh @@ -0,0 +1,146 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail +set -o xtrace + +if [ $(dpkg --print-architecture) = "amd64" ]; then + ARCH="amd64" +else + ARCH="arm64" +fi + +function waitfor_boot_finished { + export DEBIAN_FRONTEND=noninteractive + + echo "args: ${ARGS}" + # Wait for cloudinit on the surrogate to complete before making progress + while [[ ! -f /var/lib/cloud/instance/boot-finished ]]; do + echo 'Waiting for cloud-init...' + sleep 1 + done +} + +function install_packages { + apt-get update && sudo apt-get install software-properties-common e2fsprogs -y + add-apt-repository --yes --update ppa:ansible/ansible && sudo apt-get install ansible -y + ansible-galaxy collection install community.general +} + +function execute_playbook { + + tee /etc/ansible/ansible.cfg </dev/null +LOCALE_ARCHIVE=/usr/lib/locale/locale-archive +LANG="en_US.UTF-8" +LANGUAGE="en_US.UTF-8" +LC_ALL="en_US.UTF-8" +LC_CTYPE="en_US.UTF-8" +EOF +} + +function setup_locale { + cat <>/etc/locale.gen +en_US.UTF-8 UTF-8 +EOF + + cat </etc/default/locale +LANG="C.UTF-8" +LC_CTYPE="C.UTF-8" +EOF + locale-gen en_US.UTF-8 +} + +sed -i 's/- hosts: all/- hosts: localhost/' ansible/playbook.yml + +waitfor_boot_finished +install_packages +setup_postgesql_env +setup_locale +execute_playbook + +#################### +# stage 2 things +#################### + +function install_nix() { + sudo su -c "curl --proto '=https' --tlsv1.2 -sSf -L https://install.determinate.systems/nix | sh -s -- install --no-confirm \ + --extra-conf \"substituters = https://cache.nixos.org https://nix-postgres-artifacts.s3.amazonaws.com\" \ + --extra-conf \"trusted-public-keys = nix-postgres-artifacts:dGZlQOvKcNEjvT7QEAJbcV6b6uk7VF/hWMjhYleiaLI=% cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=\" " -s /bin/bash root + . /nix/var/nix/profiles/default/etc/profile.d/nix-daemon.sh + +} + +function execute_stage2_playbook { + sudo tee /etc/ansible/ansible.cfg </dev/null; then - echo "Error: Could not create key directory $KEY_DIR" >&2 - exit 1 - fi - chmod 1777 "$KEY_DIR" - - if [[ ! -f "$KEY_FILE" ]]; then - if ! (dd if=/dev/urandom bs=32 count=1 2>/dev/null | od -A n -t x1 | tr -d ' \n' > "$KEY_FILE"); then - if ! (openssl rand -hex 32 > "$KEY_FILE"); then - echo "00000000000000000000000000000000" > "$KEY_FILE" - echo "Warning: Using fallback key" >&2 + getkey-script = pkgs.stdenv.mkDerivation { + name = "pgsodium-getkey"; + buildCommand = '' + mkdir -p $out/bin + cat > $out/bin/pgsodium-getkey << 'EOF' + #!${pkgs.bash}/bin/bash + set -euo pipefail + + TMPDIR_BASE=$(mktemp -d) + + if [[ "$(uname)" == "Darwin" ]]; then + KEY_DIR="/private/tmp/pgsodium" + else + KEY_DIR="''${PGSODIUM_KEY_DIR:-$TMPDIR_BASE/pgsodium}" + fi + KEY_FILE="$KEY_DIR/pgsodium.key" + + if ! mkdir -p "$KEY_DIR" 2>/dev/null; then + echo "Error: Could not create key directory $KEY_DIR" >&2 + exit 1 + fi + chmod 1777 "$KEY_DIR" + + if [[ ! -f "$KEY_FILE" ]]; then + if ! (dd if=/dev/urandom bs=32 count=1 2>/dev/null | od -A n -t x1 | tr -d ' \n' > "$KEY_FILE"); then + if ! (openssl rand -hex 32 > "$KEY_FILE"); then + echo "00000000000000000000000000000000" > "$KEY_FILE" + echo "Warning: Using fallback key" >&2 + fi fi + chmod 644 "$KEY_FILE" fi - chmod 644 "$KEY_FILE" - fi - - if [[ -f "$KEY_FILE" && -r "$KEY_FILE" ]]; then - cat "$KEY_FILE" - else - echo "Error: Cannot read key file $KEY_FILE" >&2 - exit 1 - fi - ''; + + if [[ -f "$KEY_FILE" && -r "$KEY_FILE" ]]; then + cat "$KEY_FILE" + else + echo "Error: Cannot read key file $KEY_FILE" >&2 + exit 1 + fi + EOF + chmod +x $out/bin/pgsodium-getkey + ''; + }; # Use the shared setup but with a test-specific name start-postgres-server-bin = makePostgresDevSetup { @@ -675,6 +682,8 @@ echo "listen_addresses = '*'" >> "$PGTAP_CLUSTER"/postgresql.conf echo "port = 5435" >> "$PGTAP_CLUSTER"/postgresql.conf echo "host all all 127.0.0.1/32 trust" >> $PGTAP_CLUSTER/pg_hba.conf + echo "Checking shared_preload_libraries setting:" + grep -rn "shared_preload_libraries" "$PGTAP_CLUSTER"/postgresql.conf # Remove timescaledb if running orioledb-17 check echo "I AM ${pgpkg.version}====================================================" if [[ "${pgpkg.version}" == *"17"* ]]; then diff --git a/http/.gitkeep b/http/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/meta-data b/meta-data new file mode 100644 index 000000000..0551428a2 --- /dev/null +++ b/meta-data @@ -0,0 +1,2 @@ +instance-id: iid-local01 +local-hostname: packer-ubuntu diff --git a/migrations/README.md b/migrations/README.md index 19d2bf4b3..e756e2ce7 100644 --- a/migrations/README.md +++ b/migrations/README.md @@ -78,15 +78,18 @@ Additionally, [supabase/postgres](https://github.com/supabase/postgres/blob/deve ### Add a Migration +First, start a local postgres server and apply the migrations ```shell -# Start the database server -docker-compose up +nix run .#dbmate-tool -- --version 15 --flake-url "." +``` -# create a new migration +Then create a new migration +```shell +cd migrations dbmate new '' ``` -Then, populate the migration at `./db/migrations/xxxxxxxxx_` and make sure it execute sucessfully with +Then, execute the migration at `./db/migrations/xxxxxxxxx_` and make sure it runs sucessfully with ```shell dbmate up diff --git a/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql b/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql new file mode 100644 index 000000000..822a7587e --- /dev/null +++ b/migrations/db/migrations/20250205060043_disable_log_statement_on_internal_roles.sql @@ -0,0 +1,6 @@ +-- migrate:up +alter role supabase_admin set log_statement = none; +alter role supabase_auth_admin set log_statement = none; +alter role supabase_storage_admin set log_statement = none; + +-- migrate:down diff --git a/migrations/db/migrations/20250205144616_move_orioledb_to_extensions_schema.sql b/migrations/db/migrations/20250205144616_move_orioledb_to_extensions_schema.sql new file mode 100644 index 000000000..259a6b0e5 --- /dev/null +++ b/migrations/db/migrations/20250205144616_move_orioledb_to_extensions_schema.sql @@ -0,0 +1,26 @@ +-- migrate:up +do $$ +declare + ext_schema text; + extensions_schema_exists boolean; +begin + -- check if the "extensions" schema exists + select exists ( + select 1 from pg_namespace where nspname = 'extensions' + ) into extensions_schema_exists; + + if extensions_schema_exists then + -- check if the "orioledb" extension is in the "public" schema + select nspname into ext_schema + from pg_extension e + join pg_namespace n on e.extnamespace = n.oid + where extname = 'orioledb'; + + if ext_schema = 'public' then + execute 'alter extension orioledb set schema extensions'; + end if; + end if; +end $$; + +-- migrate:down + diff --git a/migrations/schema-15.sql b/migrations/schema-15.sql index 1bff8b9d8..e6f4d7cd4 100644 --- a/migrations/schema-15.sql +++ b/migrations/schema-15.sql @@ -574,28 +574,6 @@ END $$; --- --- Name: secrets_encrypt_secret_secret(); Type: FUNCTION; Schema: vault; Owner: - --- - -CREATE FUNCTION vault.secrets_encrypt_secret_secret() RETURNS trigger - LANGUAGE plpgsql - AS $$ - BEGIN - new.secret = CASE WHEN new.secret IS NULL THEN NULL ELSE - CASE WHEN new.key_id IS NULL THEN NULL ELSE pg_catalog.encode( - pgsodium.crypto_aead_det_encrypt( - pg_catalog.convert_to(new.secret, 'utf8'), - pg_catalog.convert_to((new.id::text || new.description::text || new.created_at::text || new.updated_at::text)::text, 'utf8'), - new.key_id::uuid, - new.nonce - ), - 'base64') END END; - RETURN new; - END; - $$; - - SET default_tablespace = ''; SET default_table_access_method = heap; @@ -782,30 +760,6 @@ CREATE TABLE storage.objects ( ); --- --- Name: decrypted_secrets; Type: VIEW; Schema: vault; Owner: - --- - -CREATE VIEW vault.decrypted_secrets AS - SELECT secrets.id, - secrets.name, - secrets.description, - secrets.secret, - CASE - WHEN (secrets.secret IS NULL) THEN NULL::text - ELSE - CASE - WHEN (secrets.key_id IS NULL) THEN NULL::text - ELSE convert_from(pgsodium.crypto_aead_det_decrypt(decode(secrets.secret, 'base64'::text), convert_to(((((secrets.id)::text || secrets.description) || (secrets.created_at)::text) || (secrets.updated_at)::text), 'utf8'::name), secrets.key_id, secrets.nonce), 'utf8'::name) - END - END AS decrypted_secret, - secrets.key_id, - secrets.nonce, - secrets.created_at, - secrets.updated_at - FROM vault.secrets; - - -- -- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - -- diff --git a/migrations/schema-orioledb-17.sql b/migrations/schema-orioledb-17.sql index 531970c37..b2a7cd3eb 100644 --- a/migrations/schema-orioledb-17.sql +++ b/migrations/schema-orioledb-17.sql @@ -91,7 +91,7 @@ CREATE SCHEMA vault; -- Name: orioledb; Type: EXTENSION; Schema: -; Owner: - -- -CREATE EXTENSION IF NOT EXISTS orioledb WITH SCHEMA public; +CREATE EXTENSION IF NOT EXISTS orioledb WITH SCHEMA extensions; -- @@ -589,28 +589,6 @@ END $$; --- --- Name: secrets_encrypt_secret_secret(); Type: FUNCTION; Schema: vault; Owner: - --- - -CREATE FUNCTION vault.secrets_encrypt_secret_secret() RETURNS trigger - LANGUAGE plpgsql - AS $$ - BEGIN - new.secret = CASE WHEN new.secret IS NULL THEN NULL ELSE - CASE WHEN new.key_id IS NULL THEN NULL ELSE pg_catalog.encode( - pgsodium.crypto_aead_det_encrypt( - pg_catalog.convert_to(new.secret, 'utf8'), - pg_catalog.convert_to((new.id::text || new.description::text || new.created_at::text || new.updated_at::text)::text, 'utf8'), - new.key_id::uuid, - new.nonce - ), - 'base64') END END; - RETURN new; - END; - $$; - - SET default_tablespace = ''; SET default_table_access_method = orioledb; @@ -797,30 +775,6 @@ CREATE TABLE storage.objects ( ); --- --- Name: decrypted_secrets; Type: VIEW; Schema: vault; Owner: - --- - -CREATE VIEW vault.decrypted_secrets AS - SELECT id, - name, - description, - secret, - CASE - WHEN (secret IS NULL) THEN NULL::text - ELSE - CASE - WHEN (key_id IS NULL) THEN NULL::text - ELSE convert_from(pgsodium.crypto_aead_det_decrypt(decode(secret, 'base64'::text), convert_to(((((id)::text || description) || (created_at)::text) || (updated_at)::text), 'utf8'::name), key_id, nonce), 'utf8'::name) - END - END AS decrypted_secret, - key_id, - nonce, - created_at, - updated_at - FROM vault.secrets; - - -- -- Name: refresh_tokens id; Type: DEFAULT; Schema: auth; Owner: - -- diff --git a/nix/ext/vault.nix b/nix/ext/vault.nix index c822fcd51..2cbd7e7a9 100644 --- a/nix/ext/vault.nix +++ b/nix/ext/vault.nix @@ -1,23 +1,24 @@ -{ lib, stdenv, fetchFromGitHub, postgresql }: +{ lib, stdenv, fetchFromGitHub, libsodium, postgresql }: stdenv.mkDerivation rec { pname = "vault"; - version = "0.2.9"; + version = "0.3.1"; - buildInputs = [ postgresql ]; + buildInputs = [ libsodium postgresql ]; src = fetchFromGitHub { owner = "supabase"; repo = pname; rev = "refs/tags/v${version}"; - hash = "sha256-kXTngBW4K6FkZM8HvJG2Jha6OQqbejhnk7tchxy031I="; + hash = "sha256-MC87bqgtynnDhmNZAu96jvfCpsGDCPB0g5TZfRQHd30="; }; installPhase = '' mkdir -p $out/{lib,share/postgresql/extension} - cp sql/*.sql $out/share/postgresql/extension - cp *.control $out/share/postgresql/extension + install -D *${postgresql.dlSuffix} $out/lib + install -D -t $out/share/postgresql/extension sql/*.sql + install -D -t $out/share/postgresql/extension *.control ''; meta = with lib; { diff --git a/nix/ext/wrappers/default.nix b/nix/ext/wrappers/default.nix index 887bbc10d..5dda1992d 100644 --- a/nix/ext/wrappers/default.nix +++ b/nix/ext/wrappers/default.nix @@ -133,7 +133,7 @@ buildPgrxExtension_0_12_6 rec { fi mv $out/lib/wrappers-${version}${postgresql.dlSuffix} $out/lib/wrappers${postgresql.dlSuffix} ln -s $out/lib/wrappers${postgresql.dlSuffix} $out/lib/wrappers-${version}${postgresql.dlSuffix} - + echo "Creating wrappers.so symlinks to support pg_upgrade..." if [ -f "$out/lib/wrappers.so" ]; then while read -r previous_version; do diff --git a/nix/tests/expected/z_15_ext_interface.out b/nix/tests/expected/z_15_ext_interface.out index 9914fa3b9..2fedc4366 100644 --- a/nix/tests/expected/z_15_ext_interface.out +++ b/nix/tests/expected/z_15_ext_interface.out @@ -4750,6 +4750,9 @@ order by sslinfo | public | ssl_issuer_dn | | text sslinfo | public | ssl_issuer_field | text | text sslinfo | public | ssl_version | | text + supabase_vault | vault | _crypto_aead_det_decrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea + supabase_vault | vault | _crypto_aead_det_encrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea + supabase_vault | vault | _crypto_aead_det_noncegen | | bytea supabase_vault | vault | create_secret | new_secret text, new_name text, new_description text, new_key_id uuid | uuid supabase_vault | vault | update_secret | secret_id uuid, new_secret text, new_name text, new_description text, new_key_id uuid | void tablefunc | public | connectby | text, text, text, text, integer, text | SETOF record @@ -5226,7 +5229,7 @@ order by xml2 | public | xpath_table | text, text, text, text, text | SETOF record xml2 | public | xslt_process | text, text | text xml2 | public | xslt_process | text, text, text | text -(5055 rows) +(5058 rows) /* @@ -6034,6 +6037,15 @@ order by postgis_topology | topology | topology | name postgis_topology | topology | topology | precision postgis_topology | topology | topology | srid + supabase_vault | vault | decrypted_secrets | created_at + supabase_vault | vault | decrypted_secrets | decrypted_secret + supabase_vault | vault | decrypted_secrets | description + supabase_vault | vault | decrypted_secrets | id + supabase_vault | vault | decrypted_secrets | key_id + supabase_vault | vault | decrypted_secrets | name + supabase_vault | vault | decrypted_secrets | nonce + supabase_vault | vault | decrypted_secrets | secret + supabase_vault | vault | decrypted_secrets | updated_at supabase_vault | vault | secrets | created_at supabase_vault | vault | secrets | description supabase_vault | vault | secrets | id @@ -6357,5 +6369,5 @@ order by wrappers | public | wrappers_fdw_stats | rows_in wrappers | public | wrappers_fdw_stats | rows_out wrappers | public | wrappers_fdw_stats | updated_at -(1097 rows) +(1106 rows) diff --git a/nix/tests/expected/z_17_ext_interface.out b/nix/tests/expected/z_17_ext_interface.out index 37f417f81..c7cef4a5f 100644 --- a/nix/tests/expected/z_17_ext_interface.out +++ b/nix/tests/expected/z_17_ext_interface.out @@ -1078,41 +1078,41 @@ order by ltree | public | subpath | ltree, integer, integer | ltree ltree | public | text2ltree | text | ltree moddatetime | public | moddatetime | | trigger - orioledb | public | orioledb_commit_hash | | text - orioledb | public | orioledb_compression_max_level | | bigint - orioledb | public | orioledb_evict_pages | relid oid, maxlevel integer | void - orioledb | public | orioledb_get_evicted_trees | OUT datoid oid, OUT relnode oid, OUT root_downlink bigint, OUT file_length bigint | SETOF record - orioledb | public | orioledb_get_index_descrs | OUT datoid oid, OUT reloid oid, OUT relnode oid, OUT refcnt oid | SETOF record - orioledb | public | orioledb_get_table_descrs | OUT datoid oid, OUT reloid oid, OUT relnode oid, OUT refcnt oid | SETOF record - orioledb | public | orioledb_has_retained_undo | | boolean - orioledb | public | orioledb_idx_structure | relid oid, tree_name text, options character varying, depth integer | text - orioledb | public | orioledb_index_description | datoid oid, relid oid, relnode oid, index_type text, OUT name text, OUT description text | record - orioledb | public | orioledb_index_oids | OUT datoid oid, OUT table_reloid oid, OUT table_relnode oid, OUT index_reloid oid, OUT index_relnode oid, OUT index_type text | SETOF record - orioledb | public | orioledb_index_rows | relid oid, OUT total integer, OUT dead integer | record - orioledb | public | orioledb_page_stats | OUT pool_name text, OUT busy_pages bigint, OUT free_pages bigint, OUT dirty_pages bigint, OUT all_pages bigint | SETOF record - orioledb | public | orioledb_recovery_synchronized | | boolean - orioledb | public | orioledb_relation_size | relid oid | bigint - orioledb | public | orioledb_sys_tree_check | num integer, force_map_check boolean | boolean - orioledb | public | orioledb_sys_tree_rows | num integer | SETOF jsonb - orioledb | public | orioledb_sys_tree_structure | num integer, options character varying, depth integer | text - orioledb | public | orioledb_table_description | datoid oid, relid oid, relnode oid | text - orioledb | public | orioledb_table_description | relid oid | text - orioledb | public | orioledb_table_oids | OUT datoid oid, OUT reloid oid, OUT relnode oid | SETOF record - orioledb | public | orioledb_table_pages | relid oid, OUT blkno bigint, OUT level integer, OUT rightlink bigint, OUT hikey jsonb | SETOF record - orioledb | public | orioledb_tableam_handler | internal | table_am_handler - orioledb | public | orioledb_tbl_are_indices_equal | idx_oid1 regclass, idx_oid2 regclass | boolean - orioledb | public | orioledb_tbl_bin_structure | relid oid, print_bytes boolean, depth integer | text - orioledb | public | orioledb_tbl_check | relid oid, force_map_check boolean | boolean - orioledb | public | orioledb_tbl_compression_check | level bigint, relid oid, ranges integer[] | text - orioledb | public | orioledb_tbl_indices | relid oid | text - orioledb | public | orioledb_tbl_structure | relid oid, options character varying, depth integer | text - orioledb | public | orioledb_tree_stat | relid regclass, OUT level integer, OUT count bigint, OUT avgoccupied double precision, OUT avgvacated double precision | SETOF record - orioledb | public | orioledb_ucm_check | | boolean - orioledb | public | orioledb_version | | text - orioledb | public | orioledb_write_pages | relid oid | void - orioledb | public | pg_stopevent_reset | eventname text | boolean - orioledb | public | pg_stopevent_set | eventname text, condition jsonpath | void - orioledb | public | pg_stopevents | OUT stopevent text, OUT condition jsonpath, OUT waiter_pids integer[] | SETOF record + orioledb | extensions | orioledb_commit_hash | | text + orioledb | extensions | orioledb_compression_max_level | | bigint + orioledb | extensions | orioledb_evict_pages | relid oid, maxlevel integer | void + orioledb | extensions | orioledb_get_evicted_trees | OUT datoid oid, OUT relnode oid, OUT root_downlink bigint, OUT file_length bigint | SETOF record + orioledb | extensions | orioledb_get_index_descrs | OUT datoid oid, OUT reloid oid, OUT relnode oid, OUT refcnt oid | SETOF record + orioledb | extensions | orioledb_get_table_descrs | OUT datoid oid, OUT reloid oid, OUT relnode oid, OUT refcnt oid | SETOF record + orioledb | extensions | orioledb_has_retained_undo | | boolean + orioledb | extensions | orioledb_idx_structure | relid oid, tree_name text, options character varying, depth integer | text + orioledb | extensions | orioledb_index_description | datoid oid, relid oid, relnode oid, index_type text, OUT name text, OUT description text | record + orioledb | extensions | orioledb_index_oids | OUT datoid oid, OUT table_reloid oid, OUT table_relnode oid, OUT index_reloid oid, OUT index_relnode oid, OUT index_type text | SETOF record + orioledb | extensions | orioledb_index_rows | relid oid, OUT total integer, OUT dead integer | record + orioledb | extensions | orioledb_page_stats | OUT pool_name text, OUT busy_pages bigint, OUT free_pages bigint, OUT dirty_pages bigint, OUT all_pages bigint | SETOF record + orioledb | extensions | orioledb_recovery_synchronized | | boolean + orioledb | extensions | orioledb_relation_size | relid oid | bigint + orioledb | extensions | orioledb_sys_tree_check | num integer, force_map_check boolean | boolean + orioledb | extensions | orioledb_sys_tree_rows | num integer | SETOF jsonb + orioledb | extensions | orioledb_sys_tree_structure | num integer, options character varying, depth integer | text + orioledb | extensions | orioledb_table_description | datoid oid, relid oid, relnode oid | text + orioledb | extensions | orioledb_table_description | relid oid | text + orioledb | extensions | orioledb_table_oids | OUT datoid oid, OUT reloid oid, OUT relnode oid | SETOF record + orioledb | extensions | orioledb_table_pages | relid oid, OUT blkno bigint, OUT level integer, OUT rightlink bigint, OUT hikey jsonb | SETOF record + orioledb | extensions | orioledb_tableam_handler | internal | table_am_handler + orioledb | extensions | orioledb_tbl_are_indices_equal | idx_oid1 regclass, idx_oid2 regclass | boolean + orioledb | extensions | orioledb_tbl_bin_structure | relid oid, print_bytes boolean, depth integer | text + orioledb | extensions | orioledb_tbl_check | relid oid, force_map_check boolean | boolean + orioledb | extensions | orioledb_tbl_compression_check | level bigint, relid oid, ranges integer[] | text + orioledb | extensions | orioledb_tbl_indices | relid oid | text + orioledb | extensions | orioledb_tbl_structure | relid oid, options character varying, depth integer | text + orioledb | extensions | orioledb_tree_stat | relid regclass, OUT level integer, OUT count bigint, OUT avgoccupied double precision, OUT avgvacated double precision | SETOF record + orioledb | extensions | orioledb_ucm_check | | boolean + orioledb | extensions | orioledb_version | | text + orioledb | extensions | orioledb_write_pages | relid oid | void + orioledb | extensions | pg_stopevent_reset | eventname text | boolean + orioledb | extensions | pg_stopevent_set | eventname text, condition jsonpath | void + orioledb | extensions | pg_stopevents | OUT stopevent text, OUT condition jsonpath, OUT waiter_pids integer[] | SETOF record pageinspect | public | brin_metapage_info | page bytea, OUT magic text, OUT version integer, OUT pagesperrange integer, OUT lastrevmappage bigint | record pageinspect | public | brin_page_type | page bytea | text pageinspect | public | brin_revmap_data | page bytea, OUT pages tid | SETOF tid @@ -4707,6 +4707,9 @@ order by sslinfo | public | ssl_issuer_dn | | text sslinfo | public | ssl_issuer_field | text | text sslinfo | public | ssl_version | | text + supabase_vault | vault | _crypto_aead_det_decrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea + supabase_vault | vault | _crypto_aead_det_encrypt | message bytea, additional bytea, key_id bigint, context bytea, nonce bytea | bytea + supabase_vault | vault | _crypto_aead_det_noncegen | | bytea supabase_vault | vault | create_secret | new_secret text, new_name text, new_description text, new_key_id uuid | uuid supabase_vault | vault | update_secret | secret_id uuid, new_secret text, new_name text, new_description text, new_key_id uuid | void tablefunc | public | connectby | text, text, text, text, integer | SETOF record @@ -4906,7 +4909,7 @@ order by xml2 | public | xpath_table | text, text, text, text, text | SETOF record xml2 | public | xslt_process | text, text | text xml2 | public | xslt_process | text, text, text | text -(4747 rows) +(4750 rows) /* @@ -4966,26 +4969,26 @@ order by hypopg | public | hypopg_list_indexes | indexrelid hypopg | public | hypopg_list_indexes | schema_name hypopg | public | hypopg_list_indexes | table_name - orioledb | public | orioledb_index | datoid - orioledb | public | orioledb_index | description - orioledb | public | orioledb_index | index_relnode - orioledb | public | orioledb_index | index_reloid - orioledb | public | orioledb_index | index_type - orioledb | public | orioledb_index | name - orioledb | public | orioledb_index | table_relnode - orioledb | public | orioledb_index | table_reloid - orioledb | public | orioledb_index_descr | datoid - orioledb | public | orioledb_index_descr | refcnt - orioledb | public | orioledb_index_descr | relnode - orioledb | public | orioledb_index_descr | reloid - orioledb | public | orioledb_table | datoid - orioledb | public | orioledb_table | description - orioledb | public | orioledb_table | relnode - orioledb | public | orioledb_table | reloid - orioledb | public | orioledb_table_descr | datoid - orioledb | public | orioledb_table_descr | refcnt - orioledb | public | orioledb_table_descr | relnode - orioledb | public | orioledb_table_descr | reloid + orioledb | extensions | orioledb_index | datoid + orioledb | extensions | orioledb_index | description + orioledb | extensions | orioledb_index | index_relnode + orioledb | extensions | orioledb_index | index_reloid + orioledb | extensions | orioledb_index | index_type + orioledb | extensions | orioledb_index | name + orioledb | extensions | orioledb_index | table_relnode + orioledb | extensions | orioledb_index | table_reloid + orioledb | extensions | orioledb_index_descr | datoid + orioledb | extensions | orioledb_index_descr | refcnt + orioledb | extensions | orioledb_index_descr | relnode + orioledb | extensions | orioledb_index_descr | reloid + orioledb | extensions | orioledb_table | datoid + orioledb | extensions | orioledb_table | description + orioledb | extensions | orioledb_table | relnode + orioledb | extensions | orioledb_table | reloid + orioledb | extensions | orioledb_table_descr | datoid + orioledb | extensions | orioledb_table_descr | refcnt + orioledb | extensions | orioledb_table_descr | relnode + orioledb | extensions | orioledb_table_descr | reloid pg_buffercache | public | pg_buffercache | bufferid pg_buffercache | public | pg_buffercache | isdirty pg_buffercache | public | pg_buffercache | pinning_backends @@ -5321,6 +5324,15 @@ order by postgis_topology | topology | topology | name postgis_topology | topology | topology | precision postgis_topology | topology | topology | srid + supabase_vault | vault | decrypted_secrets | created_at + supabase_vault | vault | decrypted_secrets | decrypted_secret + supabase_vault | vault | decrypted_secrets | description + supabase_vault | vault | decrypted_secrets | id + supabase_vault | vault | decrypted_secrets | key_id + supabase_vault | vault | decrypted_secrets | name + supabase_vault | vault | decrypted_secrets | nonce + supabase_vault | vault | decrypted_secrets | secret + supabase_vault | vault | decrypted_secrets | updated_at supabase_vault | vault | secrets | created_at supabase_vault | vault | secrets | description supabase_vault | vault | secrets | id @@ -5338,5 +5350,5 @@ order by wrappers | public | wrappers_fdw_stats | rows_in wrappers | public | wrappers_fdw_stats | rows_out wrappers | public | wrappers_fdw_stats | updated_at -(398 rows) +(407 rows) diff --git a/nix/tests/postgresql.conf.in b/nix/tests/postgresql.conf.in index ef860afcb..483a1a8e2 100644 --- a/nix/tests/postgresql.conf.in +++ b/nix/tests/postgresql.conf.in @@ -718,7 +718,7 @@ default_text_search_config = 'pg_catalog.english' #local_preload_libraries = '' #session_preload_libraries = '' -shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain, pg_tle, plan_filter, pg_backtrace' # (change requires restart) +shared_preload_libraries = 'pg_stat_statements, pgaudit, plpgsql, plpgsql_check, pg_cron, pg_net, pgsodium, timescaledb, auto_explain, pg_tle, plan_filter, pg_backtrace, supabase_vault' # (change requires restart) jit_provider = 'llvmjit' # JIT library to use @@ -795,6 +795,7 @@ jit_provider = 'llvmjit' # JIT library to use # Add settings for extensions here pgsodium.getkey_script = '@PGSODIUM_GETKEY_SCRIPT@' +vault.getkey_script = '@PGSODIUM_GETKEY_SCRIPT@' auto_explain.log_min_duration = 10s cron.database_name = 'postgres' diff --git a/nix/tools/dbmate-tool.sh.in b/nix/tools/dbmate-tool.sh.in index 8c489839b..1197228af 100644 --- a/nix/tools/dbmate-tool.sh.in +++ b/nix/tools/dbmate-tool.sh.in @@ -15,39 +15,46 @@ CURRENT_SYSTEM="@CURRENT_SYSTEM@" ANSIBLE_VARS="@ANSIBLE_VARS@" PGBOUNCER_AUTH_SCHEMA_SQL=@PGBOUNCER_AUTH_SCHEMA_SQL@ STAT_EXTENSION_SQL=@STAT_EXTENSION_SQL@ + +# Start PostgreSQL using nix +start_postgres() { + DATDIR=$(mktemp -d) + echo "Starting PostgreSQL in directory: $DATDIR" # Create the DATDIR if it doesn't exist + nix run "$FLAKE_URL#start-server" -- "$PSQL_VERSION" --skip-migrations --daemonize --datdir "$DATDIR" + echo "PostgreSQL started." +} + # Cleanup function cleanup() { echo "Cleaning up..." - # Kill postgres processes first + # Check if PostgreSQL processes exist if pgrep -f "postgres" >/dev/null; then - pkill -TERM postgres || true - sleep 2 - fi - - # Then kill overmind - if [ -S "./.overmind.sock" ]; then - overmind kill || true - sleep 2 + echo "Stopping PostgreSQL gracefully..." + + # Use pg_ctl to stop PostgreSQL + pg_ctl -D "$DATDIR" stop + + # Wait a bit for graceful shutdown + sleep 5 + + # Check if processes are still running + if pgrep -f "postgres" >/dev/null; then + echo "Warning: Some PostgreSQL processes could not be stopped gracefully." + fi + else + echo "PostgreSQL is not running, skipping stop." fi - # Kill tmux sessions explicitly - pkill -f "tmux.*overmind.*postgresql" || true - tmux ls 2>/dev/null | grep 'overmind' | cut -d: -f1 | xargs -I{} tmux kill-session -t {} || true - - # Force kill any stragglers - pkill -9 -f "(postgres|tmux.*overmind.*postgresql)" || true - - rm -f .overmind.sock Procfile - - # Final verification - if ps aux | grep -E "(postgres|overmind|tmux.*postgresql)" | grep -v grep >/dev/null; then - ps aux | grep -E "(postgres|overmind|tmux.*postgresql)" | grep -v grep - return 1 + # Always exit successfully, log any remaining processes + if pgrep -f "postgres" >/dev/null; then + echo "Warning: Some PostgreSQL processes could not be cleaned up:" + pgrep -f "postgres" + else + echo "Cleanup completed successfully" fi } -# Set up trap for cleanup on script exit # Function to display help print_help() { @@ -57,7 +64,7 @@ print_help() { echo " -v, --version [15|16|orioledb-17|all] Specify the PostgreSQL version to use (required defaults to --version all)" echo " -p, --port PORT Specify the port number to use (default: 5435)" echo " -h, --help Show this help message" - echo + echo " -f, --flake-url URL Specify the flake URL to use (default: github:supabase/postgres)" echo "Description:" echo " Runs 'dbmate up' against a locally running the version of database you specify. Or 'all' to run against all versions." echo " NOTE: To create a migration, you must run 'nix develop' and then 'dbmate new ' to create a new migration file." @@ -66,9 +73,9 @@ print_help() { echo " nix run .#dbmate-tool" echo " nix run .#dbmate-tool -- --version 15" echo " nix run .#dbmate-tool -- --version 16 --port 5433" + echo " nix run .#dbmate-tool -- --version 16 --port 5433 --flake-url github:supabase/postgres/" } - # Parse arguments while [[ "$#" -gt 0 ]]; do case "$1" in @@ -125,7 +132,7 @@ wait_for_postgres() { local max_attempts=30 # Increased significantly local attempt=1 - # Give overmind a moment to actually start the process + # Give PostgreSQL a moment to actually start the process sleep 2 while [ $attempt -le $max_attempts ]; do @@ -142,7 +149,6 @@ wait_for_postgres() { done echo "PostgreSQL failed to start after $max_attempts attempts" - overmind echo postgres return 1 } @@ -175,26 +181,7 @@ trim_schema() { ;; esac } -overmind_start() { - cat > Procfile << EOF -postgres_${PSQL_VERSION}: exec nix run "$FLAKE_URL#start-server" -- "$PSQL_VERSION" --skip-migrations -EOF - overmind start -D - echo "Waiting for overmind socket..." - max_wait=5 - count=0 - while [ $count -lt $max_wait ]; do - if [ -S "./.overmind.sock" ]; then - # Found the socket, give it a moment to be ready - sleep 5 - echo "Socket file found and ready" - break - fi - echo "Waiting for socket file (attempt $count/$max_wait)" - sleep 1 - count=$((count + 1)) - done -} + perform_dump() { local max_attempts=3 local attempt=1 @@ -214,21 +201,18 @@ perform_dump() { echo "All dump attempts failed" return 1 } + migrate_version() { echo "PSQL_VERSION: $PSQL_VERSION" - overmind kill || true - rm -f .overmind.sock Procfile || true + #pkill -f "postgres" || true # Ensure PostgreSQL is stopped before starting PSQLBIN=$(nix build --no-link "$FLAKE_URL#psql_$PSQL_VERSION/bin" --json | jq -r '.[].outputs.out + "/bin"') echo "Using PostgreSQL version $PSQL_VERSION from $PSQLBIN" - # Start overmind - overmind_start - echo "Waiting for overmind socket..." - - + # Start PostgreSQL + start_postgres echo "Waiting for PostgreSQL to be ready..." - #Wait for PostgreSQL to be ready to accept connections + # Wait for PostgreSQL to be ready to accept connections if ! wait_for_postgres; then echo "Failed to connect to PostgreSQL server" exit 1 @@ -255,11 +239,11 @@ EOSQL "${PSQLBIN}/psql" -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$PGBOUNCER_AUTH_SCHEMA_SQL" "${PSQLBIN}/psql" -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$STAT_EXTENSION_SQL" - #set db url to run dbmate + # Set db url to run dbmate export DATABASE_URL="postgres://$PGSQL_USER:$PGPASSWORD@localhost:$PORTNO/postgres?sslmode=disable" - #export path so dbmate can find correct psql and pg_dump + # Export path so dbmate can find correct psql and pg_dump export PATH="$PSQLBIN:$PATH" - # run init scripts + # Run init scripts if ! dbmate --migrations-dir "$MIGRATIONS_DIR/init-scripts" up; then echo "Error: Initial migration failed" exit 1 diff --git a/nix/tools/run-server.sh.in b/nix/tools/run-server.sh.in index 75c5f8de7..0586e010b 100644 --- a/nix/tools/run-server.sh.in +++ b/nix/tools/run-server.sh.in @@ -56,12 +56,15 @@ start_postgres() { } stop_postgres() { - pg_ctl stop -D "$DATDIR" -m fast + if [ "$DAEMONIZE" = true ]; then + echo "PostgreSQL is running in daemon mode. Please stop it using pg_ctl." + else + pg_ctl stop -D "$DATDIR" -m fast + fi } trap 'stop_postgres' SIGINT SIGTERM -# Parse arguments # Parse arguments while [[ "$#" -gt 0 ]]; do case "$1" in @@ -104,6 +107,15 @@ while [[ "$#" -gt 0 ]]; do print_help exit 0 ;; + --datdir) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + DATDIR="$2" + shift 2 + else + echo "Error: --datadir requires a directory path" + exit 1 + fi + ;; *) if [[ "$1" =~ ^- ]]; then echo "Unknown option: $1" @@ -161,7 +173,9 @@ STAT_EXTENSION_SQL=@STAT_EXTENSION_SQL@ MECAB_LIB=@MECAB_LIB@ # Setup directories and locale settings -DATDIR=$(mktemp -d) +if [[ -z "$DATDIR" ]]; then + DATDIR=$(mktemp -d) +fi LOCALE_ARCHIVE=@LOCALES@ CURRENT_SYSTEM=@CURRENT_SYSTEM@ @@ -209,6 +223,8 @@ sed -e "1i\\ include = '$DATDIR/supautils.conf'" \ -e "\$a\\ pgsodium.getkey_script = '$PGSODIUM_GETKEY_SCRIPT'" \ +-e "\$a\\ +vault.getkey_script = '$PGSODIUM_GETKEY_SCRIPT'" \ -e "s|data_directory = '/var/lib/postgresql/data'|data_directory = '$DATDIR'|" \ -e "s|hba_file = '/etc/postgresql/pg_hba.conf'|hba_file = '$DATDIR/pg_hba.conf'|" \ -e "s|ident_file = '/etc/postgresql/pg_ident.conf'|ident_file = '$DATDIR/pg_ident.conf'|" \ @@ -329,6 +345,7 @@ EOSQL fi fi echo "Shutting down PostgreSQL..." + stop_postgres # Step 4: Restart PostgreSQL in the foreground (with log output visible) or as a daemon diff --git a/qemu-arm64-nix.pkr.hcl b/qemu-arm64-nix.pkr.hcl new file mode 100644 index 000000000..a9843d1ad --- /dev/null +++ b/qemu-arm64-nix.pkr.hcl @@ -0,0 +1,142 @@ +variable "ansible_arguments" { + type = string + default = "--skip-tags install-postgrest,install-pgbouncer,install-supabase-internal" +} + +variable "environment" { + type = string + default = "prod" +} + +variable "git_sha" { + type = string +} + +locals { + creator = "packer" +} + +variable "postgres-version" { + type = string + default = "" +} + +variable "postgres-major-version" { + type = string + default = "" +} + +variable "git-head-version" { + type = string + default = "unknown" +} + +variable "packer-execution-id" { + type = string + default = "unknown" +} + +packer { + required_plugins { + amazon = { + source = "github.com/hashicorp/amazon" + version = "~> 1" + } + qemu = { + version = "~> 1.0" + source = "github.com/hashicorp/qemu" + } + } +} + +source "null" "dependencies" { + communicator = "none" +} + +build { + name = "cloudimg.deps" + sources = ["source.null.dependencies"] + + provisioner "shell-local" { + inline = [ + "cp /usr/share/AAVMF/AAVMF_VARS.fd AAVMF_VARS.fd", + "cloud-localds seeds-cloudimg.iso user-data-cloudimg meta-data" + ] + inline_shebang = "/bin/bash -e" + } +} + +source "qemu" "cloudimg" { + boot_wait = "2s" + cpus = 8 + disk_image = true + disk_size = "15G" + format = "qcow2" + headless = true + http_directory = "http" + iso_checksum = "file:https://cloud-images.ubuntu.com/focal/current/SHA256SUMS" + iso_url = "https://cloud-images.ubuntu.com/focal/current/focal-server-cloudimg-arm64.img" + memory = 40000 + qemu_binary = "qemu-system-aarch64" + qemuargs = [ + ["-machine", "virt,gic-version=3"], + ["-cpu", "host"], + ["-device", "virtio-gpu-pci"], + ["-drive", "if=pflash,format=raw,id=ovmf_code,readonly=on,file=/usr/share/AAVMF/AAVMF_CODE.fd"], + ["-drive", "if=pflash,format=raw,id=ovmf_vars,file=AAVMF_VARS.fd"], + ["-drive", "file=output-cloudimg/packer-cloudimg,format=qcow2"], + ["-drive", "file=seeds-cloudimg.iso,format=raw"], + ["--enable-kvm"] + ] + shutdown_command = "sudo -S shutdown -P now" + ssh_handshake_attempts = 500 + ssh_password = "ubuntu" + ssh_timeout = "1h" + ssh_username = "ubuntu" + ssh_wait_timeout = "1h" + use_backing_file = false + accelerator = "kvm" +} + +build { + name = "cloudimg.image" + sources = ["source.qemu.cloudimg"] + + # Copy ansible playbook + provisioner "shell" { + inline = ["mkdir /tmp/ansible-playbook"] + } + + provisioner "file" { + source = "ansible" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "scripts" + destination = "/tmp/ansible-playbook" + } + + provisioner "file" { + source = "migrations" + destination = "/tmp" + } + + provisioner "file" { + source = "ebssurrogate/files/unit-tests" + destination = "/tmp" + } + + provisioner "shell" { + environment_vars = [ + "POSTGRES_MAJOR_VERSION=${var.postgres-major-version}", + "POSTGRES_SUPABASE_VERSION=${var.postgres-version}", + "GIT_SHA=${var.git_sha}" + ] + use_env_var_file = true + script = "ebssurrogate/scripts/qemu-bootstrap-nix.sh" + execute_command = "sudo -S sh -c '. {{.EnvVarFile}} && cd /tmp/ansible-playbook && {{.Path}}'" + start_retry_timeout = "5m" + skip_clean = true + } +} diff --git a/qemu_artifact.md b/qemu_artifact.md new file mode 100644 index 000000000..c26b63b36 --- /dev/null +++ b/qemu_artifact.md @@ -0,0 +1,50 @@ +# QEMU artifact + +We build a container image that contains a QEMU qcow2 disk image. Container images are a convenient mechanism to ship the disk image to the nodes where they're needed. + +Given the size of the image, the first VM using it on a node might take a while to come up, while the image is being pulled down. The image can be pre-fetched to avoid this; we might also switch to other deployment mechanisms in the future. + +### Build process + +The current AMI process involves a few steps: + +1. nix package is build and published using GHA (`.github/workflows/nix-build.yml`) + - this builds Postgres along with the PG extensions we use. +2. "stage1" build (`amazon-arm64-nix.pkr.hcl`, invoked via `.github/workflows/ami-release-nix.yml`) + - uses an upstream Ubuntu image to initialize the AMI + - installs and configures the majority of the software that gets shipped as part of the AMI (e.g. gotrue, postgrest, ...) +3. "stage2" build (`stage2-nix-psql.pkr.hcl`, invoked via `.github/workflows/ami-release-nix.yml`) + - uses the image published from (2) + - installs and configures the software that is build and published using nix in (1) + - cleans up build dependencies etc + +The QEMU artifact process collapses (2) and (3): + +a. nix package is build and published using GHA (`.github/workflows/nix-build.yml`) +b. packer build (`qemu-arm64-nix.pkr.hcl`) + - uses an upstream Ubuntu live image as the base + - performs the work that was performed as part of the "stage1" and "stage2" builds + - this work is executed using `ebssurrogate/scripts/qemu-bootstrap-nix.sh` + +## Publish image for later use + +Following `make init alpine-image`, the generated VM image should be bundled as a container image with the name: `supabase-postgres-test` . Publish the built docker image to a registry of your choosing, and use the published image with e.g. KubeVirt. + +## Iterating on image + +For faster iteration, it's more convenient to build the image on an ubuntu bare-metal node that's part of the EKS cluster you're using. Build the image in the `k8s.io` namespace in order for it to be available for immediate use on that node. + +### Dependencies note + +Installing `docker.io` on an EKS node might interfere with the k8s setup of the node. You can instead install `nerdctl` and `buildkit`: + +```bash +curl -L -O https://github.com/containerd/nerdctl/releases/download/v2.0.0/nerdctl-2.0.0-linux-arm64.tar.gz +tar -xzf nerdctl-2.0.0-linux-arm64.tar.gz +mv ./nerdctl /usr/local/bin/ +curl -O -L https://github.com/moby/buildkit/releases/download/v0.17.1/buildkit-v0.17.1.linux-arm64.tar.gz +tar -xzf buildkit-v0.17.1.linux-arm64.tar.gz +mv bin/* /usr/local/bin/ +``` + +You'll need to run buildkit: `buildkitd` diff --git a/scripts/90-cleanup-qemu.sh b/scripts/90-cleanup-qemu.sh new file mode 100644 index 000000000..d6c6ade05 --- /dev/null +++ b/scripts/90-cleanup-qemu.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +# DigitalOcean Marketplace Image Validation Tool +# © 2021 DigitalOcean LLC. +# This code is licensed under Apache 2.0 license (see LICENSE.md for details) + +set -o errexit + +# Ensure /tmp exists and has the proper permissions before +# checking for security updates +# https://github.com/digitalocean/marketplace-partners/issues/94 +if [[ ! -d /tmp ]]; then + mkdir /tmp +fi +chmod 1777 /tmp + +if [ -n "$(command -v yum)" ]; then + yum update -y + yum clean all +elif [ -n "$(command -v apt-get)" ]; then + # Cleanup more packages + apt-get -y remove --purge \ + automake \ + autoconf \ + autotools-dev \ + cmake-data \ + cpp-8 \ + cpp-9 \ + cpp-10 \ + gcc-8 \ + gcc-9 \ + gcc-10 \ + git \ + git-man \ + ansible \ + libicu-dev \ + libcgal-dev \ + libgcc-9-dev \ + libgcc-8-dev \ + ansible \ + snapd + + add-apt-repository --yes --remove ppa:ansible/ansible + + source /etc/os-release + apt-get -y remove --purge linux-headers-5.11.0-1021-aws + + apt-get -y update + apt-get -y upgrade + apt-get -y autoremove + apt-get -y autoclean +fi +rm -rf /tmp/* /var/tmp/* +history -c +cat /dev/null > /root/.bash_history +unset HISTFILE +find /var/log -mtime -1 -type f -exec truncate -s 0 {} \; +rm -rf /var/log/*.gz /var/log/*.[0-9] /var/log/*-???????? +rm -rf /var/lib/cloud/instances/* +rm -f /root/.ssh/authorized_keys /etc/ssh/*key* +touch /etc/ssh/revoked_keys +chmod 600 /etc/ssh/revoked_keys + +cat /dev/null > /var/log/lastlog +cat /dev/null > /var/log/wtmp diff --git a/user-data-cloudimg b/user-data-cloudimg new file mode 100644 index 000000000..9a74d237a --- /dev/null +++ b/user-data-cloudimg @@ -0,0 +1,16 @@ +#cloud-config +users: + - name: root + lock_passwd: false + ssh_redirect_user: true + hashed_passwd: "$6$canonical.$0zWaW71A9ke9ASsaOcFTdQ2tx1gSmLxMPrsH0rF0Yb.2AEKNPV1lrF94n6YuPJmnUy2K2/JSDtxuiBDey6Lpa/" + - name: ubuntu + lock_passwd: false + hashed_passwd: "$6$canonical.$0zWaW71A9ke9ASsaOcFTdQ2tx1gSmLxMPrsH0rF0Yb.2AEKNPV1lrF94n6YuPJmnUy2K2/JSDtxuiBDey6Lpa/" + ssh_redired_user: false + sudo: "ALL=(ALL) NOPASSWD:ALL" + shell: /usr/bin/bash + groups: [adm, audio, cdrom, dialout, dip, floppy, lxd, netdev, plugdev, sudo, video] +ssh_pwauth: True +disable_root: false +preserve_hostname: true