diff --git a/.changes/unreleased/Added-20241204-153550.yaml b/.changes/unreleased/Added-20241204-153550.yaml new file mode 100644 index 00000000..a324b32b --- /dev/null +++ b/.changes/unreleased/Added-20241204-153550.yaml @@ -0,0 +1,3 @@ +kind: Added +body: compatibility tests running automatically on each new tag +time: 2024-12-04T15:35:50.352507104+01:00 diff --git a/.github/workflows/compatibility-tests.yaml b/.github/workflows/compatibility-tests.yaml new file mode 100644 index 00000000..db7af57c --- /dev/null +++ b/.github/workflows/compatibility-tests.yaml @@ -0,0 +1,129 @@ +name: compatibility-tests + +on: + push: + tags: + - '*' + workflow_dispatch: + +jobs: + test-compatibility: + runs-on: ubuntu-latest + steps: + - name: Maximize build space + uses: AdityaGarg8/remove-unwanted-software@v4.1 + with: + remove-android: 'true' + remove-haskell: 'true' + remove-codeql: 'true' + remove-dotnet: 'true' + remove-swapfile: 'true' + + - name: checkout + uses: actions/checkout@v4 + with: + fetch-depth: 0 # we need to know about previous tags + + - name: print the latest version without "v" + id: latest-no-v + uses: miniscruff/changie-action@v2 + with: + version: latest + args: latest --remove-prefix + + - name: determine-versions + run: | + NEW_VERSION=${{ steps.latest-no-v.outputs.output }} + + # Extract the major and minor parts of the version + MAJOR=$(echo $NEW_VERSION | cut -d. -f1) + MINOR=$(echo $NEW_VERSION | cut -d. -f2) + PREV_MINOR=$((MINOR - 1)) + + # Find the previous version tag in the format ".." + PREVIOUS_VERSION=$(git tag -l "${MAJOR}.${PREV_MINOR}.*" | sort --version-sort | tail -1) + + # If no previous version is found, fallback to a default or handle the error somehow + if [ -z "$PREVIOUS_VERSION" ]; then + echo "No previous version found, ensure your repository has proper tags." + exit 1 + fi + + # remove after creating 0.6.0 tag. + # Basically, we are incompatible with 0.4, and while there is no 0.6 (and prev minor being 0.5), + # we will run compat tests from previous patch version + if [ "$PREVIOUS_VERSION" = "0.4.42" ]; then + PREVIOUS_VERSION="0.5.30" + fi + + echo "NEW_VERSION=$NEW_VERSION" >> $GITHUB_ENV + echo "PREVIOUS_VERSION=$PREVIOUS_VERSION" >> $GITHUB_ENV + + - name: Setup Go + uses: actions/setup-go@v3 + with: + go-version: '1.22' + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y build-essential + + curl -LO https://dl.k8s.io/release/v1.25.3/bin/linux/amd64/kubectl + chmod +x ./kubectl && sudo mv ./kubectl /usr/local/bin + + HELM_VERSION="v3.10.3" + curl -sSL https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxvf - --strip-components=1 linux-amd64/helm + chmod +x ./helm && sudo mv ./helm /usr/local/bin + + go install sigs.k8s.io/kind@v0.25.0 + + curl -sSL https://storage.yandexcloud.net/yandexcloud-ydb/install.sh | bash + + echo "$(pwd)" >> $GITHUB_PATH + echo "$HOME/ydb/bin" >> $GITHUB_PATH + echo "$HOME/go/bin" >> $GITHUB_PATH + + - name: Check dependencies + run: | + gcc --version + go version + kind version + kubectl version --client=true + helm version + ydb version + + - name: Setup k8s cluster + run: | + kind create cluster \ + --image=kindest/node:v1.31.2@sha256:18fbefc20a7113353c7b75b5c869d7145a6abd6269154825872dc59c1329912e \ + --config=./tests/cfg/kind-cluster-config.yaml + + kubectl wait --timeout=5m --for=condition=ready node -l worker=true + + - name: Run compatibility tests + env: + NEW_VERSION: ${{ env.NEW_VERSION }} + PREVIOUS_VERSION: ${{ env.PREVIOUS_VERSION }} + run: | + go install gotest.tools/gotestsum@v1.12.0 + gotestsum --format pkgname --jsonfile log.json -- -v -timeout 3600s -p 1 ./tests/compatibility/... -ginkgo.vv -coverprofile cover.out + + - name: convert-to-human-readable + run: jq -r '.Output| gsub("[\\n]"; "")' log.json 2>/dev/null 1>log.txt || true + + - name: artifact-upload-step + uses: actions/upload-artifact@v4 + id: artifact-upload-step + if: always() + with: + name: compat-tests-log + path: log.txt + if-no-files-found: error + + - name: echo-tests-log-url + run: echo 'Unit tests log URL is ${{ steps.artifact-upload-step.outputs.artifact-url }}' + + - name: Teardown k8s cluster + run: | + kind delete cluster diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 413a31a1..36fbacfa 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -82,7 +82,7 @@ jobs: sudo apt-get update sudo apt-get install -y build-essential - go install sigs.k8s.io/kind@v0.17.0 + go install sigs.k8s.io/kind@v0.25.0 curl -LO https://dl.k8s.io/release/v1.25.3/bin/linux/amd64/kubectl chmod +x ./kubectl @@ -108,14 +108,10 @@ jobs: run: | kind delete cluster kind create cluster \ - --image=kindest/node:v1.25.3@sha256:cd248d1438192f7814fbca8fede13cfe5b9918746dfa12583976158a834fd5c5 \ - --config=./e2e/kind-cluster-config.yaml + --image=kindest/node:v1.31.2@sha256:18fbefc20a7113353c7b75b5c869d7145a6abd6269154825872dc59c1329912e \ + --config=./tests/cfg/kind-cluster-config.yaml kubectl wait --timeout=5m --for=condition=ready node -l worker=true - - kubectl label --overwrite node kind-worker topology.kubernetes.io/zone=fakeZone1 - kubectl label --overwrite node kind-worker2 topology.kubernetes.io/zone=fakeZone2 - kubectl label --overwrite node kind-worker3 topology.kubernetes.io/zone=fakeZone3 - name: build-operator-image uses: docker/build-push-action@v3 with: @@ -138,7 +134,7 @@ jobs: kind load docker-image k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0 --nodes kind-worker,kind-worker2,kind-worker3 - name: pull-and-load-ydb-image run: | - YDB_IMAGE=$(grep "anchor_for_fetching_image_from_workflow" ./e2e/tests/**/*.go | grep -o -E '"cr\.yandex.*"') + YDB_IMAGE=$(grep "anchor_for_fetching_image_from_workflow" ./tests/**/*.go | grep -o -E '"cr\.yandex.*"') YDB_IMAGE=${YDB_IMAGE:1:-1} # strip "" docker pull $YDB_IMAGE kind load docker-image $YDB_IMAGE --nodes kind-worker,kind-worker2,kind-worker3 @@ -148,7 +144,7 @@ jobs: - name: run-e2e-tests id: run-e2e-tests run: | - gotestsum --format pkgname --jsonfile log.json -- -v -timeout 3600s -p 1 ./e2e/... -ginkgo.vv + gotestsum --format pkgname --jsonfile log.json -- -v -timeout 3600s -p 1 ./tests/e2e/... -ginkgo.vv - name: convert-json-log-to-human-readable run: jq -r '.Output| gsub("[\\n]"; "")' log.json 2>/dev/null 1>log.txt || true - name: artifact-upload-step @@ -164,4 +160,3 @@ jobs: - name: teardown-k8s-cluster run: | kind delete cluster - diff --git a/.golangci.yml b/.golangci.yml index b980d72f..1dc7fd9d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -37,15 +37,22 @@ output: # colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number" format: colored-line-number - # print lines of code with issue, default is true print-issued-lines: true - # print linter name in the end of issue text, default is true print-linter-name: true # all available settings of specific linters linters-settings: + stylecheck: + dot-import-whitelist: + # used in tests only + - "github.com/onsi/ginkgo/v2" + # used in tests only + - "github.com/onsi/gomega" + # it's nice having string constants in a separate package, but without boilerplate + - "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" + errcheck: # report about not checking of errors in types assetions: `a := b.(MyStruct)`; # default is false: such cases aren't reported by default. @@ -77,6 +84,7 @@ linters-settings: excludes: - G101 - G115 + - G601 # no longer actual since 1.22 fieldalignment: # print struct with more effective memory layout or not, false by default suggest-new: true diff --git a/README.md b/README.md index 63c0b890..1323b427 100644 --- a/README.md +++ b/README.md @@ -1,5 +1,5 @@ -[![run-tests](https://github.com/ydb-platform/ydb-kubernetes-operator/actions/workflows/run-tests.yml/badge.svg)](https://github.com/ydb-platform/ydb-kubernetes-operator/actions/workflows/run-tests.yml) [![upload-artifacts](https://github.com/ydb-platform/ydb-kubernetes-operator/actions/workflows/upload-artifacts.yml/badge.svg)](https://github.com/ydb-platform/ydb-kubernetes-operator/actions/workflows/upload-artifacts.yml) +[![compatibility-tests](https://github.com/ydb-platform/ydb-kubernetes-operator/actions/workflows/compatibility-tests.yaml/badge.svg)](https://github.com/ydb-platform/ydb-kubernetes-operator/actions/workflows/compatibility-tests.yaml) # YDB Kubernetes Operator diff --git a/docs/tests.md b/docs/tests.md index 028cdfc8..8cbd46bd 100644 --- a/docs/tests.md +++ b/docs/tests.md @@ -43,7 +43,7 @@ containers on a single machine. This allows for full-blown smoke tests - apply t `Storage` manifests, wait until the `Pod`s become available, run `SELECT 1` inside one of those pods to check that YDB is actually up and running! -E2E tests are located in [e2e](../e2e) folder. +E2E tests are located in [tests/e2e](../tests/e2e) folder. ## Running tests @@ -81,7 +81,7 @@ kind delete cluster --name=local-kind kind create cluster \ --image=kindest/node:v1.21.14@sha256:9d9eb5fb26b4fbc0c6d95fa8c790414f9750dd583f5d7cee45d92e8c26670aa1 \ --name=local-kind \ - --config=./e2e/kind-cluster-config.yaml \ + --config=./tests/cfg/kind-cluster-config.yaml \ --wait 5m # Switch your local kubeconfig to the newly created cluster: @@ -92,7 +92,7 @@ kubectl config use-context kind-local-kind # kind/ydb-operator:current # You have to download the ydb image and build the operator image yourself. Then, explicitly -# upload them into the kind cluster. Refer to `./github/e2e.yaml` github workflow which essentially +# upload them into the kind cluster. Refer to `./github/workflows/run-tests.yaml` github workflow which essentially # does the same thing. kind --name local-kind load docker-image kind/ydb-operator:current kind --name local-kind load docker-image ydb: diff --git a/e2e/operator-values.yaml b/e2e/operator-values.yaml deleted file mode 100644 index 41c1efe4..00000000 --- a/e2e/operator-values.yaml +++ /dev/null @@ -1,10 +0,0 @@ -image: - repository: kind/ydb-operator - tag: current - pullPolicy: Never - -webhook: - enabled: true - - patch: - enabled: true diff --git a/e2e/tests/data/ca.crt b/e2e/tests/data/ca.crt deleted file mode 100644 index 2ef75164..00000000 --- a/e2e/tests/data/ca.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIC/TCCAeWgAwIBAgIRAKt07W/6Gy+wIe5lk+0YyqwwDQYJKoZIhvcNAQELBQAw -FzEVMBMGA1UEAxMMdGVzdC1yb290LWNhMCAXDTI0MDQxMjA4MTM0NFoYDzIwNTQw -NDA1MDgxMzQ0WjAXMRUwEwYDVQQDEwx0ZXN0LXJvb3QtY2EwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQCs4c67HN45wf9jokQLdxDsfLUO5I3FiPVE4uWZ -Ma2zNSL2pMJBk95Vmj6pP/3HA6llUm3flVotzVzHh3C0j/WBZf6YE31eWlyMokuE -uLAGfKw/qL+gqC6Phoa72f9kJwnGXsVMDZijAEyqNquLZwgkK+4jgQcVhpGi/3ws -fop0qYVcK5LKAT5lGSx0MEuW74jheLDlscMsmUqVl2SCWRC/UGY+nUOTpcKK8228 -Corc+DEFstqOIXGH9n/k0ZmBxjh8eU4IRp+LiDcB6x/yI4edAYJK/mnejmSA2i6a -K2mSzCJfBSVnDxwiGWY6xm8eAh6MaDU6iuqqkFFPltGl90CRAgMBAAGjQjBAMA4G -A1UdDwEB/wQEAwICpDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTFXnzlk4tO -SosBlUEM7AiDOYuW7TANBgkqhkiG9w0BAQsFAAOCAQEAJHTeKc1ySltDwwINFVp1 -z5kFlIMyp3l146xn6qT5VWzYP4dZWdJz3gjAML56HRCHNe6B3MijjQY8sRObD5YI -589xpEhLMr+JR/DmU3Yol0XGILUdZ6TeK6FK+U3gYJdy3U39rcV2usEGfN5SRV4b -rUZg8asLFWPY7cdWBNIkF2yuJcF6PIpnuhzbfiEtOZ9ucvgnc62XPDnIuMSBKojG -Bj7QfqadEddSOztZFL00FZULIwSVE/8o0+HQvTBGjbZMuvuSBTJswujYUkD5Sy2O -wfc+dMJ6dbcDZR+5Q1kLEg+Oq7jjx/BTS35Axo1fPxO/WumJppTUmuEbFtEoX47o -fQ== ------END CERTIFICATE----- diff --git a/e2e/tests/data/tls.crt b/e2e/tests/data/tls.crt deleted file mode 100644 index 9549d538..00000000 --- a/e2e/tests/data/tls.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDYDCCAkigAwIBAgIRAKHQc3G24VDsWff5qQxyl3EwDQYJKoZIhvcNAQELBQAw -FzEVMBMGA1UEAxMMdGVzdC1yb290LWNhMB4XDTI0MDQxMjA4MTgxOVoXDTM5MDQw -OTA4MTgxOVowDzENMAsGA1UEChMEdGVzdDCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBAMqGcyqm3UIEZCL3Ge8Qh63OU0QykI3ymtj9gDJ57vtZdUOpHF18 -QsX4Urf6vGNvI8gWXEtPbK8Dchqo+3Reuejjq8aRFs1RaPVZoWPD8i782p6L6/oX -4k5zAwvCdjC2y/YuUf4GTZqpfwDbhSfH+EdacrfwmYjLxYaehEn7z9M67R5wSekr -1bMuxBKZW1sclmip3JRf3uBuHfMLkoYpTa7KcEycn9YstZ1h2XuWFLk42GS4bMZs -Cq2E3sEqD8LOyCHs/qWWGM3Txz6edLTi5U6XtzPeWwxyM5W7fOpC0Q3iw58myGsm -DDmUngyneS/mzyLvKTywzsu2sRAqdjqsZCUCAwEAAaOBrjCBqzAdBgNVHSUEFjAU -BggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBTF -Xnzlk4tOSosBlUEM7AiDOYuW7TBbBgNVHREEVDBSgiJzdG9yYWdlLWdycGMueWRi -LnN2Yy5jbHVzdGVyLmxvY2FsgiwqLnN0b3JhZ2UtaW50ZXJjb25uZWN0LnlkYi5z -dmMuY2x1c3Rlci5sb2NhbDANBgkqhkiG9w0BAQsFAAOCAQEAQYRvaGGFsnR4cp8Y -MJo948t7zI3Pgy20YonmYTriz1zeEYNj9+5t678p04FlCjIx0j4dad1tFC1bNtnI -FOJNMkhiyC1JPKSN7HR90L5P9JfsuunUVTNEHP6EuLj2/VnMXj+30qX9i0kVkcnH -OV9yXk8cpN0GuUUyfKUq5k/WS+/15JRoLJ9F8vS1lm77nHeQ9F7m1Yjqkc131N3k -czFLe3wexWhnKCnNw5OtdFgJsMgL09pYgnN+0xyE4iLY8f0o/b58J/tpbKkeJb9L -YogP1+ultfGvrlX8TE6iAEi8UA+rZCNpGwiqTI2Y9clZNnkYu7UgW7kn2ih5/vgU -xntdsA== ------END CERTIFICATE----- diff --git a/e2e/tests/data/tls.key b/e2e/tests/data/tls.key deleted file mode 100644 index c8999b40..00000000 --- a/e2e/tests/data/tls.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAyoZzKqbdQgRkIvcZ7xCHrc5TRDKQjfKa2P2AMnnu+1l1Q6kc -XXxCxfhSt/q8Y28jyBZcS09srwNyGqj7dF656OOrxpEWzVFo9VmhY8PyLvzanovr -+hfiTnMDC8J2MLbL9i5R/gZNmql/ANuFJ8f4R1pyt/CZiMvFhp6ESfvP0zrtHnBJ -6SvVsy7EEplbWxyWaKnclF/e4G4d8wuShilNrspwTJyf1iy1nWHZe5YUuTjYZLhs -xmwKrYTewSoPws7IIez+pZYYzdPHPp50tOLlTpe3M95bDHIzlbt86kLRDeLDnybI -ayYMOZSeDKd5L+bPIu8pPLDOy7axECp2OqxkJQIDAQABAoIBAQCXKyBPl9nTax+r -kbID5dzAeR9h6jRIH+xBR4cnJiih6LZE2LfZd+UHjEGCHl/8AHs+4KHnfNNtFy9W -gweeZw5xrW8MekQA4WFssYhrxVjChe5RJbPwK1+6mtKNNout9OPtT8nXyLCoXxfz -defAN900tWinr6mKmD9KKowoBROtYBsKVwO4DOl+JonZP7A5eF/ao4T8XFSOOju5 -aU9flCjw6iagzKuryrJCzt4LsTA3/svuMswPQ7LRMSJm+RsceiBl4oVCuRmJ79tN -pmhHZqG80upp/idgyuGZu+rgePiXSnmtn47iXXvCjZKTNMkabQMc2WZ5P3HWfG0/ -28kXH5IBAoGBAPswl0SrvZdcQbalNHjLc1JtMMhZs6LPZe+/QTP4i/5qXvee8Dox -vvGJlvjNQuOGNsPUTsLMY203eKQWJ4+soK2qDhuDCxGmyQJ7NgTTzdhOCULctXlD -pdBHxiSmCUGEtW7Goe3M1/CL3sVfRp85FzMYItHHZEMTHZNzoZ+XXAzBAoGBAM5n -SknfqHM4oBT0LxxkmVFyiFoCi2grXzsGjzK5rk30DWajwT5sAy9x+X2vmG2zKkBJ -tTBYYUFh+SlNtBsY/TW2ZIcmHDpu0w82zXXDLLhUwYjxudXzyG30yU8d4woAwE6u -NPiVmFX6C8U597a/nC36LLsrSnUPvaJ4voFyRlxlAoGAHJa8MLmnO2nppMMKxNDL -EE+TJMpo0pfuTyoiXqrkLBGpO1+gkc8Fn3H8d9bMzR6CbyljyXH/wvd0SKCo4gZQ -x1M6hdEVWm30JM8nJ8d/fyXqkeySzvlvDtSMbbFkDkvvZms/FNSioyMYOLiOTiLu -TAdsNxoNhEDRte2MMKDGfkECgYAGH1o8xr2gbVWSSYv8M5+4osUYpmqsNF0myxME -Vi2tckfTe5gH2fxeM+tKpyLGXkIqlgUh4f1Aiz9w0jU9eIhKR5bDy4Wa1h68nMuL -araw4RK8lS8GAa04VcKC7kgFy+/oZZJ8rTNPmZMvzoBik1x2oK0jAC29OzJM13gP -LuyXYQKBgQCvkar/nthVUlScVa3uPzqKdpNlqmeXQRMVwcmRQJcJoHYWkaF2RRAx -AcjWTkP6/ThUgPhI76O0neQqO2N0P+FnCkaE4qU6Wg+dZcH0q7HVnt2TRuCzsECQ -kxDqdbF+qbXmYfbJ4dKkJdF0dNad4/d2hL/wbvyHXDEEGpuIlZAZrg== ------END RSA PRIVATE KEY----- diff --git a/internal/controllers/database/controller_test.go b/internal/controllers/database/controller_test.go index 8a9490cd..ee099129 100644 --- a/internal/controllers/database/controller_test.go +++ b/internal/controllers/database/controller_test.go @@ -21,11 +21,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - testobjects "github.com/ydb-platform/ydb-kubernetes-operator/e2e/tests/test-objects" . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/database" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storage" "github.com/ydb-platform/ydb-kubernetes-operator/internal/test" + testobjects "github.com/ydb-platform/ydb-kubernetes-operator/tests/test-k8s-objects" ) var ( @@ -65,7 +65,7 @@ var _ = Describe("Database controller medium tests", func() { }, } Expect(k8sClient.Create(ctx, &namespace)).Should(Succeed()) - storageSample = *testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) + storageSample = *testobjects.DefaultStorage(filepath.Join("..", "..", "..", "tests", "data", "storage-mirror-3-dc-config.yaml")) Expect(k8sClient.Create(ctx, &storageSample)).Should(Succeed()) By("checking that Storage created on local cluster...") diff --git a/internal/controllers/databasenodeset/controller_test.go b/internal/controllers/databasenodeset/controller_test.go index 47beb813..26f7aeb7 100644 --- a/internal/controllers/databasenodeset/controller_test.go +++ b/internal/controllers/databasenodeset/controller_test.go @@ -16,12 +16,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - testobjects "github.com/ydb-platform/ydb-kubernetes-operator/e2e/tests/test-objects" . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/database" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/databasenodeset" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storage" "github.com/ydb-platform/ydb-kubernetes-operator/internal/test" + testobjects "github.com/ydb-platform/ydb-kubernetes-operator/tests/test-k8s-objects" ) const ( @@ -71,7 +71,7 @@ var _ = Describe("DatabaseNodeSet controller medium tests", func() { } Expect(k8sClient.Create(ctx, &namespace)).Should(Succeed()) - storageSample = *testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) + storageSample = *testobjects.DefaultStorage(filepath.Join("..", "..", "..", "tests", "data", "storage-mirror-3-dc-config.yaml")) Expect(k8sClient.Create(ctx, &storageSample)).Should(Succeed()) By("checking that Storage created on local cluster...") diff --git a/internal/controllers/monitoring/monitoring_test.go b/internal/controllers/monitoring/monitoring_test.go index 5a31d55c..be2651e0 100644 --- a/internal/controllers/monitoring/monitoring_test.go +++ b/internal/controllers/monitoring/monitoring_test.go @@ -16,11 +16,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" api "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - testobjects "github.com/ydb-platform/ydb-kubernetes-operator/e2e/tests/test-objects" . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" //nolint:revive,stylecheck "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/monitoring" "github.com/ydb-platform/ydb-kubernetes-operator/internal/labels" "github.com/ydb-platform/ydb-kubernetes-operator/internal/test" + testobjects "github.com/ydb-platform/ydb-kubernetes-operator/tests/test-k8s-objects" ) var ( @@ -119,7 +119,7 @@ func createMockDBAndSvc() { func createMockStorageAndSvc() { GinkgoHelper() - stor := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) + stor := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "tests", "data", "storage-mirror-3-dc-config.yaml")) Expect(k8sClient.Create(ctx, stor)).Should(Succeed()) stor.Status.State = StorageReady diff --git a/internal/controllers/remotedatabasenodeset/controller_test.go b/internal/controllers/remotedatabasenodeset/controller_test.go index 25252795..d0335e93 100644 --- a/internal/controllers/remotedatabasenodeset/controller_test.go +++ b/internal/controllers/remotedatabasenodeset/controller_test.go @@ -25,7 +25,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - testobjects "github.com/ydb-platform/ydb-kubernetes-operator/e2e/tests/test-objects" ydbannotations "github.com/ydb-platform/ydb-kubernetes-operator/internal/annotations" . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/database" @@ -36,6 +35,7 @@ import ( "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storagenodeset" "github.com/ydb-platform/ydb-kubernetes-operator/internal/resources" "github.com/ydb-platform/ydb-kubernetes-operator/internal/test" + testobjects "github.com/ydb-platform/ydb-kubernetes-operator/tests/test-k8s-objects" ) const ( @@ -203,7 +203,7 @@ var _ = Describe("RemoteDatabaseNodeSet controller tests", func() { var databaseSample *v1alpha1.Database BeforeEach(func() { - storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "tests", "data", "storage-mirror-3-dc-config.yaml")) databaseSample = testobjects.DefaultDatabase() databaseSample.Spec.NodeSets = append(databaseSample.Spec.NodeSets, v1alpha1.DatabaseNodeSetSpecInline{ Name: testNodeSetName + "-local", diff --git a/internal/controllers/remotestoragenodeset/controller_test.go b/internal/controllers/remotestoragenodeset/controller_test.go index 0c5666c6..a1a20613 100644 --- a/internal/controllers/remotestoragenodeset/controller_test.go +++ b/internal/controllers/remotestoragenodeset/controller_test.go @@ -24,7 +24,6 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log/zap" "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - testobjects "github.com/ydb-platform/ydb-kubernetes-operator/e2e/tests/test-objects" ydbannotations "github.com/ydb-platform/ydb-kubernetes-operator/internal/annotations" . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/remotestoragenodeset" @@ -32,6 +31,7 @@ import ( "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storagenodeset" "github.com/ydb-platform/ydb-kubernetes-operator/internal/resources" "github.com/ydb-platform/ydb-kubernetes-operator/internal/test" + testobjects "github.com/ydb-platform/ydb-kubernetes-operator/tests/test-k8s-objects" ) const ( @@ -175,7 +175,7 @@ var _ = Describe("RemoteStorageNodeSet controller tests", func() { var storageSample *v1alpha1.Storage BeforeEach(func() { - storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join("..", "..", "..", "tests", "data", "storage-mirror-3-dc-config.yaml")) storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, v1alpha1.StorageNodeSetSpecInline{ Name: testNodeSetName + "-local", StorageNodeSpec: v1alpha1.StorageNodeSpec{ diff --git a/internal/controllers/storage/controller_test.go b/internal/controllers/storage/controller_test.go index ded230bb..f36a699b 100644 --- a/internal/controllers/storage/controller_test.go +++ b/internal/controllers/storage/controller_test.go @@ -17,12 +17,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - testobjects "github.com/ydb-platform/ydb-kubernetes-operator/e2e/tests/test-objects" "github.com/ydb-platform/ydb-kubernetes-operator/internal/annotations" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storage" "github.com/ydb-platform/ydb-kubernetes-operator/internal/labels" "github.com/ydb-platform/ydb-kubernetes-operator/internal/resources" "github.com/ydb-platform/ydb-kubernetes-operator/internal/test" + testobjects "github.com/ydb-platform/ydb-kubernetes-operator/tests/test-k8s-objects" ) var ( @@ -62,7 +62,7 @@ var _ = Describe("Storage controller medium tests", func() { }) It("Checking field propagation to objects", func() { - storageSample := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) + storageSample := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "tests", "data", "storage-mirror-3-dc-config.yaml")) tmpFilesDir := "/tmp/mounted_volume" testVolumeName := "sample-volume" diff --git a/internal/controllers/storage/init.go b/internal/controllers/storage/init.go index a9dc1b17..e0e0ea18 100644 --- a/internal/controllers/storage/init.go +++ b/internal/controllers/storage/init.go @@ -193,7 +193,7 @@ func (r *Reconciler) initializeBlobstorage( storage, corev1.EventTypeWarning, "InitializingStorage", - "Failed initBlobstorage Job, check Pod logs for addditional info", + "Failed initBlobstorage Job, check Pod logs for additional info", ) meta.SetStatusCondition(&storage.Status.Conditions, metav1.Condition{ Type: StorageInitializedCondition, diff --git a/internal/controllers/storagenodeset/controller_test.go b/internal/controllers/storagenodeset/controller_test.go index c93149e6..0302c85a 100644 --- a/internal/controllers/storagenodeset/controller_test.go +++ b/internal/controllers/storagenodeset/controller_test.go @@ -15,10 +15,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" - testobjects "github.com/ydb-platform/ydb-kubernetes-operator/e2e/tests/test-objects" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storage" "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/storagenodeset" "github.com/ydb-platform/ydb-kubernetes-operator/internal/test" + testobjects "github.com/ydb-platform/ydb-kubernetes-operator/tests/test-k8s-objects" ) var ( @@ -62,7 +62,7 @@ var _ = Describe("StorageNodeSet controller medium tests", func() { }) It("Check controller operation through nodeSetSpec inline spec in Storage object", func() { - storageSample := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "e2e", "tests", "data", "storage-mirror-3-dc-config.yaml")) + storageSample := testobjects.DefaultStorage(filepath.Join("..", "..", "..", "tests", "data", "storage-mirror-3-dc-config.yaml")) // Test create inline nodeSetSpec in Storage object testNodeSetName := "nodeset" diff --git a/e2e/kind-cluster-config.yaml b/tests/cfg/kind-cluster-config.yaml similarity index 63% rename from e2e/kind-cluster-config.yaml rename to tests/cfg/kind-cluster-config.yaml index 0a1f7068..18829857 100644 --- a/e2e/kind-cluster-config.yaml +++ b/tests/cfg/kind-cluster-config.yaml @@ -2,12 +2,18 @@ kind: Cluster apiVersion: kind.x-k8s.io/v1alpha4 nodes: - role: control-plane + - role: worker labels: + topology.kubernetes.io/zone: az-1 worker: true + - role: worker labels: + topology.kubernetes.io/zone: az-2 worker: true + - role: worker labels: + topology.kubernetes.io/zone: az-3 worker: true diff --git a/tests/cfg/operator-local-values.yaml b/tests/cfg/operator-local-values.yaml new file mode 100644 index 00000000..edc33299 --- /dev/null +++ b/tests/cfg/operator-local-values.yaml @@ -0,0 +1,5 @@ +webhook: + enabled: true + + patch: + enabled: true diff --git a/tests/cfg/operator-values.yaml b/tests/cfg/operator-values.yaml new file mode 100644 index 00000000..edc33299 --- /dev/null +++ b/tests/cfg/operator-values.yaml @@ -0,0 +1,5 @@ +webhook: + enabled: true + + patch: + enabled: true diff --git a/tests/compatibility/compatibility_suite_test.go b/tests/compatibility/compatibility_suite_test.go new file mode 100644 index 00000000..1b28479c --- /dev/null +++ b/tests/compatibility/compatibility_suite_test.go @@ -0,0 +1,364 @@ +package compatibility + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/kubectl/pkg/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + logf "sigs.k8s.io/controller-runtime/pkg/log" + + v1alpha1 "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + testobjects "github.com/ydb-platform/ydb-kubernetes-operator/tests/test-k8s-objects" + . "github.com/ydb-platform/ydb-kubernetes-operator/tests/test-utils" +) + +var ( + k8sClient client.Client + restConfig *rest.Config + + testEnv *envtest.Environment + + oldVersion string + newVersion string +) + +func TestCompatibility(t *testing.T) { + RegisterFailHandler(Fail) + RunSpecs(t, "Compatibility Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + useExistingCluster := true + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "deploy", "ydb-operator", "crds")}, + ErrorIfCRDPathMissing: true, + UseExistingCluster: &useExistingCluster, + } + + cfg, err := testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + if useExistingCluster && !(strings.Contains(cfg.Host, "127.0.0.1") || strings.Contains(cfg.Host, "::1") || strings.Contains(cfg.Host, "localhost")) { + Fail("You are trying to run e2e tests against some real cluster, not the local `kind` cluster!") + } + + err = v1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + clientset, err := kubernetes.NewForConfig(cfg) + Expect(err).NotTo(HaveOccurred()) + Expect(clientset).NotTo(BeNil()) + + oldVersion = os.Getenv("PREVIOUS_VERSION") + newVersion = os.Getenv("NEW_VERSION") + Expect(oldVersion).NotTo(BeEmpty(), "PREVIOUS_VERSION environment variable is required") + Expect(newVersion).NotTo(BeEmpty(), "NEW_VERSION environment variable is required") +}) + +var _ = Describe("Operator Compatibility Test", func() { + var ( + ctx context.Context + namespace corev1.Namespace + storageSample *v1alpha1.Storage + databaseSample *v1alpha1.Database + ) + + BeforeEach(func() { + ctx = context.Background() + + namespace = corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: testobjects.YdbNamespace, + }, + } + Expect(k8sClient.Create(ctx, &namespace)).Should(Succeed()) + + Eventually(func() bool { + ns := &corev1.Namespace{} + err := k8sClient.Get(ctx, client.ObjectKey{Name: namespace.Name}, ns) + return err == nil + }, Timeout, Interval).Should(BeTrue()) + + By(fmt.Sprintf("Installing previous operator version %s", oldVersion)) + InstallOperatorWithHelm(testobjects.YdbNamespace, oldVersion) + + storageSample = testobjects.DefaultStorage(filepath.Join("..", "data", "storage-mirror-3-dc-config.yaml")) + databaseSample = testobjects.DefaultDatabase() + }) + + It("Upgrades from old operator to new operator, objects persist, YQL works", func() { + By("Creating Storage resource") + Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name, + Namespace: testobjects.YdbNamespace, + }, storageSample)).Should(Succeed()) + + By("Waiting for Storage to be ready") + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) + + By("Creating Database resource") + Expect(k8sClient.Create(ctx, databaseSample)).Should(Succeed()) + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name, + Namespace: testobjects.YdbNamespace, + }, databaseSample)).Should(Succeed()) + defer DeleteDatabase(ctx, k8sClient, databaseSample) + + By("Waiting for Database to be ready") + WaitUntilDatabaseReady(ctx, k8sClient, databaseSample.Name, testobjects.YdbNamespace) + + By(fmt.Sprintf("Upgrading CRDs from %s to %s", oldVersion, newVersion)) + UpdateCRDsTo(YdbOperatorReleaseName, namespace.Name, newVersion) + + By(fmt.Sprintf("Upgrading operator from %s to %s, with uninstalling, just to cause more chaos", oldVersion, newVersion)) + UninstallOperatorWithHelm(testobjects.YdbNamespace) + InstallOperatorWithHelm(testobjects.YdbNamespace, newVersion) + + By("Verifying Storage + Database are the same objects after upgrade") + Consistently(func() error { + storageAfterUpgrade := v1alpha1.Storage{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name, + Namespace: testobjects.YdbNamespace, + }, &storageAfterUpgrade) + if err != nil { + return err + } + if storageAfterUpgrade.UID != storageSample.UID { + return fmt.Errorf("storage UID has changed") + } + + databaseAfterUpgrade := v1alpha1.Database{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name, + Namespace: testobjects.YdbNamespace, + }, &databaseAfterUpgrade) + if err != nil { + return err + } + if databaseAfterUpgrade.UID != databaseSample.UID { + return fmt.Errorf("database UID has changed") + } + return nil + }, ConsistentConditionTimeout, Interval).Should(Succeed()) + + By("Restarting storage pods (one by one, no rolling restart, for simplicity)") + RestartPodsNoRollingRestart(ctx, k8sClient, testobjects.YdbNamespace, "ydb-cluster", "kind-storage") + + By("Restarting database pods (one by one, no rolling restart, for simplicity)") + RestartPodsNoRollingRestart(ctx, k8sClient, testobjects.YdbNamespace, "ydb-cluster", "kind-database") + + database := v1alpha1.Database{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name, + Namespace: testobjects.YdbNamespace, + }, &database)).Should(Succeed()) + storageEndpoint := database.Spec.StorageEndpoint + + databasePods := corev1.PodList{} + Expect(k8sClient.List(ctx, &databasePods, + client.InNamespace(testobjects.YdbNamespace), + client.MatchingLabels{"ydb-cluster": "kind-database"}), + ).Should(Succeed()) + + Expect(databasePods.Items).ToNot(BeEmpty()) + podName := databasePods.Items[0].Name + + By("bring YDB CLI inside ydb database pod...") + BringYdbCliToPod(podName, testobjects.YdbNamespace) + + By("execute simple query inside ydb database pod...") + databasePath := "/" + testobjects.DefaultDomain + "/" + databaseSample.Name + ExecuteSimpleTableE2ETest(podName, testobjects.YdbNamespace, storageEndpoint, databasePath) + }) + + It("Upgrades from old operator to new operator, applying objects later succeeds", func() { + By("Creating Storage resource") + Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name, + Namespace: testobjects.YdbNamespace, + }, storageSample)).Should(Succeed()) + + By("Waiting for Storage to be ready") + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) + + By("Creating Database resource") + Expect(k8sClient.Create(ctx, databaseSample)).Should(Succeed()) + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name, + Namespace: testobjects.YdbNamespace, + }, databaseSample)).Should(Succeed()) + defer DeleteDatabase(ctx, k8sClient, databaseSample) + + By("Waiting for Database to be ready") + WaitUntilDatabaseReady(ctx, k8sClient, databaseSample.Name, testobjects.YdbNamespace) + + By(fmt.Sprintf("Upgrading CRDs from %s to %s", oldVersion, newVersion)) + UpdateCRDsTo(YdbOperatorReleaseName, namespace.Name, newVersion) + + By(fmt.Sprintf("Upgrading operator from %s to %s, with uninstalling, just to cause more chaos", oldVersion, newVersion)) + UninstallOperatorWithHelm(testobjects.YdbNamespace) + InstallOperatorWithHelm(testobjects.YdbNamespace, newVersion) + + By("Verifying Storage + Database are the same objects after upgrade") + Consistently(func() error { + storageAfterUpgrade := v1alpha1.Storage{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: storageSample.Name, + Namespace: testobjects.YdbNamespace, + }, &storageAfterUpgrade) + if err != nil { + return err + } + if storageAfterUpgrade.UID != storageSample.UID { + return fmt.Errorf("storage UID has changed") + } + + databaseAfterUpgrade := v1alpha1.Database{} + err = k8sClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name, + Namespace: testobjects.YdbNamespace, + }, &databaseAfterUpgrade) + if err != nil { + return err + } + if databaseAfterUpgrade.UID != databaseSample.UID { + return fmt.Errorf("database UID has changed") + } + return nil + }, ConsistentConditionTimeout, Interval).Should(Succeed()) + + By("Restarting storage pods (one by one, no rolling restart, for simplicity)") + RestartPodsNoRollingRestart(ctx, k8sClient, testobjects.YdbNamespace, "ydb-cluster", "kind-storage") + + By("Restarting database pods (one by one, no rolling restart, for simplicity)") + RestartPodsNoRollingRestart(ctx, k8sClient, testobjects.YdbNamespace, "ydb-cluster", "kind-database") + + // This is probably the most important check. + // If any major fields moved or got deleted, updating a resource will fail + // For this to work even better, TODO @jorres make this storage object as full as possible + // (utilizing as many fields), as it will help catching an error + By("applying the storage again must NOT fail because of CRD issues...") + Expect(k8sClient.Get(ctx, client.ObjectKey{ + Name: storageSample.Name, + Namespace: storageSample.Namespace, + }, storageSample)).Should(Succeed()) + Expect(k8sClient.Update(ctx, storageSample)).Should(Succeed()) + + By("applying the database again must NOT fail because of CRD issues...") + Expect(k8sClient.Get(ctx, client.ObjectKey{ + Name: databaseSample.Name, + Namespace: databaseSample.Namespace, + }, databaseSample)).Should(Succeed()) + Expect(k8sClient.Update(ctx, databaseSample)).Should(Succeed()) + }) + + AfterEach(func() { + By("Uninstalling operator") + UninstallOperatorWithHelm(testobjects.YdbNamespace) + + By("Deleting namespace") + Expect(k8sClient.Delete(ctx, &namespace)).Should(Succeed()) + Eventually(func() bool { + ns := &corev1.Namespace{} + err := k8sClient.Get(ctx, client.ObjectKey{Name: namespace.Name}, ns) + return apierrors.IsNotFound(err) + }, Timeout, Interval).Should(BeTrue()) + }) +}) + +func UpdateCRDsTo(releaseName, namespace, version string) { + tempDir, err := os.MkdirTemp("", "helm-chart-*") + Expect(err).ShouldNot(HaveOccurred()) + defer os.RemoveAll(tempDir) + + cmd := exec.Command("helm", "pull", YdbOperatorRemoteChart, "--version", version, "--untar", "--untardir", tempDir) + output, err := cmd.CombinedOutput() + Expect(err).ShouldNot(HaveOccurred(), string(output)) + + crdDir := filepath.Join(tempDir, YdbOperatorRemoteChart, "crds") + crdFiles, err := filepath.Glob(filepath.Join(crdDir, "*.yaml")) + Expect(err).ShouldNot(HaveOccurred()) + for _, crdFile := range crdFiles { + cmd := exec.Command("kubectl", "apply", "-f", crdFile) + output, err := cmd.CombinedOutput() + Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("failed to apply CRD %s: %s", crdFile, string(output))) + } +} + +var _ = AfterSuite(func() { + By("cleaning up the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) + +// This was the fastest way to implement restart without bringing rolling +// restart to operator itself or using ydbops. If you read this and +// operator already can do rolling restart natively, please rewrite +// this function! +func RestartPodsNoRollingRestart( + ctx context.Context, + k8sClient client.Client, + namespace string, + labelKey, labelValue string, +) { + podList := corev1.PodList{} + + Expect(k8sClient.List(ctx, &podList, client.InNamespace(namespace), client.MatchingLabels{ + labelKey: labelValue, + })).Should(Succeed()) + + for _, pod := range podList.Items { + originalUID := pod.UID + Expect(k8sClient.Delete(ctx, &pod)).Should(Succeed()) + + Eventually(func() bool { + newPod := corev1.Pod{} + err := k8sClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: namespace}, &newPod) + if err != nil { + return apierrors.IsNotFound(err) + } + return newPod.UID != originalUID + }, Timeout, Interval).Should(BeTrue(), fmt.Sprintf("Pod %s should be recreated with a new UID", pod.Name)) + + Eventually(func() bool { + newPod := corev1.Pod{} + err := k8sClient.Get(ctx, types.NamespacedName{Name: pod.Name, Namespace: namespace}, &newPod) + if err != nil { + return false + } + return newPod.Status.Phase == corev1.PodRunning + }, Timeout, Interval).Should(BeTrue(), fmt.Sprintf("Pod %s should be running", pod.Name)) + + time.Sleep(120 * time.Second) + } +} diff --git a/tests/data/database.crt b/tests/data/database.crt new file mode 100644 index 00000000..5dbc3481 --- /dev/null +++ b/tests/data/database.crt @@ -0,0 +1,21 @@ +-----BEGIN CERTIFICATE----- +MIIDhDCCAmygAwIBAgIUUQQsk4wdGfrawpygX64aFtR6/1IwDQYJKoZIhvcNAQEL +BQAwFzEVMBMGA1UEAwwMdGVzdC1yb290LWNhMB4XDTI0MTIwNTEzMjEwMVoXDTM5 +MTIwMjEzMjEwMVowDzENMAsGA1UECgwEdGVzdDCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAMU+EoeP6971G2uFgo2Sm2Ela8rjZSmivZZ3Xg//rj9gcfle +KNoJ5EAHdwyTfapLoSVvgy1QLun15ibWeRgbBuUt2+DiHnEpaeriUqmktki9UIzl +pAjytDwCmsjbOoLXRhCIa02tkU6rF8JjpwitZnwhTXjJTAkuJiuNvN2EEdacTlx1 +ZPdcHQveJTVJy4eOoSA8yc72XG9CWPY8mhLMTOzoZqbRX7MRoZoyYaV8TNAyQmh4 +tX045h4u1ZmMkWC06z2n+8Le3wTpu6mccOhS2ETw0j3Jefx78Zafc2s+jX0lCP71 +qiQXeEx1vuYQ5+nop2wh2nTFeFrjH+zZ4eyKduUCAwEAAaOBzzCBzDBdBgNVHREE +VjBUgiNkYXRhYmFzZS1ncnBjLnlkYi5zdmMuY2x1c3Rlci5sb2NhbIItKi5kYXRh +YmFzZS1pbnRlcmNvbm5lY3QueWRiLnN2Yy5jbHVzdGVyLmxvY2FsMB0GA1UdJQQW +MBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMBAf8EAjAAMB0GA1UdDgQWBBRe +hFiyRdaUYUiw6KnHiuTcqwh0CjAfBgNVHSMEGDAWgBRPTY3GM3OCesbsHOf9zDIu +T6ubJTANBgkqhkiG9w0BAQsFAAOCAQEAkYb1N40MGhxw07vVDPHrBfuMSgPqSqef +myPtwAIuwPIOILIAIek0yUogeMKF7kv/C5fyRnac3iHz59M7V4PetW7YhLB6G20n +bOpvq1Bp8Lw7WwRviULWHHIsS9OZlekvikEs3jS9H7XZGgmKC4mN3GbCZkpUvRjU +BBsdyKkQsDupofrzbFPaWfgRjUPGuQ27vUrZkPlmQPrZmowJpTIYwMyJxL8qFtip +JWX8qsKRle58L/K64Nx7AbW2LFjey8txJtkkROwpy9Zt7Dn0kvLcjZC2H8Nqdx8o +bPJqXdMlbGEUFDo1W6W/6zYCRUuDVvtM26Yua5DOm+6wJW+sSqlv4Q== +-----END CERTIFICATE----- diff --git a/tests/data/database.key b/tests/data/database.key new file mode 100644 index 00000000..274d72b9 --- /dev/null +++ b/tests/data/database.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDFPhKHj+ve9Rtr +hYKNkpthJWvK42Upor2Wd14P/64/YHH5XijaCeRAB3cMk32qS6Elb4MtUC7p9eYm +1nkYGwblLdvg4h5xKWnq4lKppLZIvVCM5aQI8rQ8AprI2zqC10YQiGtNrZFOqxfC +Y6cIrWZ8IU14yUwJLiYrjbzdhBHWnE5cdWT3XB0L3iU1ScuHjqEgPMnO9lxvQlj2 +PJoSzEzs6Gam0V+zEaGaMmGlfEzQMkJoeLV9OOYeLtWZjJFgtOs9p/vC3t8E6bup +nHDoUthE8NI9yXn8e/GWn3NrPo19JQj+9aokF3hMdb7mEOfp6KdsIdp0xXha4x/s +2eHsinblAgMBAAECggEALgJQ8bDDcTZlD0NtJOd8GaDQQFsjO59T0I+nEB3Q0EVH +yMarSlMQ3FOxdCxKXak3HYOhynXf/6Clr00LobEaPmbgWZh9R+HEbG8fH6XFhHmu +mrMtfI3at33XC7/BqggbtpsPxqaUVNCpoeU7bxV9qLpe9ywjcafDbRjqo5RdUd0q +J29OtBD/tfEFU17Bv0VlYW/IXmGhJp686ZDvUpybrc2qGiWJo5wnGgUpL0OLNk0L +piK7TThjDzBNLCSzq6DOZOwIoNBmGINLK1Q3SjC77zWH3YzZv/u3EjE3v61phQlj +hy2tR3yimFYGxX6ZJduockJgOC4WJznkR2G85HjggQKBgQD5+JB1o5rfo7J7Txgl +afL7EL2v/+VZ7DSs7Zh+zlIbGGr2vNnW5SgOOfpnjAY+8E91D3BzfmWdIb7/BGwB +6VAq6aH4a1xhOfWRmUgHL1vDfMfxO8hFN2Ixo2QCt+mQv09lZrExpr1RmlHPPija +XWJ0yE85cJHZlfywLQrG85F+gQKBgQDJ//ArSt4idZ4uRyslMj2ntnsBaD8DwVzl +jie1+ohMW26Fsw3059JplmmiPAvQXFzl3oZcOpJjxjHdGP7al3mdzGHbBSLDtW/h +bREUX78RZm9WHpc2K8ZkPxp36EAysZpmKCdkWH7lB/pt7926BUyqhHXk7thrkTeU +PylzE/iOZQKBgE4oHKrbg5IHMcgCO+9+x/0eB+EepoxOIU4sX7DOO7fDE7af55Cc +R8Di+dskWdOV+ZIFSMijrYvKwFgl/ss+MtWoBP+SOekgYRqsDWxJr2xY+H8BjSWv +ImGYz61V6Y5bcqymxiJbGviHwqqEqetUpXMUKkkwXDnm/oHrI2J/R2+BAoGAYlzv +7ZTqcGtH2I8tUlKRtV5lrXy+2qxI+Tts2O+jeVM4kYBsdmqAiowE6kxFEHQ5hHIE +iVq4OD+lvl1SlM0YGqAQsp9gm155mZMLsxkgqG9yHcSNq4JLfDtCP0toH4degQpi +jDmPqSVmbCxWkyPLfmk8I3uvBUpUfyr2myQJcAUCgYEA+b+ovH8gh3PGfuXtj4zW +6IjGXnmt4u2YUssF9sBklbTq9Ev8M4h58TlNh1oHWQ6yyXnpsP8vZMFU0iMYybvi +WGfPMtigyiLASjTV7Ws60uQlZ8raHqtb7QN5wJrvGqVxJe6aw/gQmxY8ejNjjWyM +1QkHQGyLaWJWFUy3blpUBsA= +-----END PRIVATE KEY----- diff --git a/tests/data/generate-crts/README.md b/tests/data/generate-crts/README.md new file mode 100644 index 00000000..ad58d965 --- /dev/null +++ b/tests/data/generate-crts/README.md @@ -0,0 +1,13 @@ +## Certificates for testing + +`ca.crt` and `ca.key` are just a self-signed CA created like this: + + +``` +openssl genrsa -out ca.key 2048 +openssl req -x509 -new -nodes -key ca.key -sha256 -days 3650 -out ca.crt -subj "/CN=test-root-ca" +``` + +Then use `generate-test-certs.sh` to generate storage and database certs, which then are used in tests. + +`ca.srl` is a file created by the script as well. diff --git a/tests/data/generate-crts/ca.crt b/tests/data/generate-crts/ca.crt new file mode 100644 index 00000000..b8bf3192 --- /dev/null +++ b/tests/data/generate-crts/ca.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDDzCCAfegAwIBAgIUPG5Ffwh8I/zAtqSePmlBTGsP2eEwDQYJKoZIhvcNAQEL +BQAwFzEVMBMGA1UEAwwMdGVzdC1yb290LWNhMB4XDTI0MTIwNTExMjk1MFoXDTM0 +MTIwMzExMjk1MFowFzEVMBMGA1UEAwwMdGVzdC1yb290LWNhMIIBIjANBgkqhkiG +9w0BAQEFAAOCAQ8AMIIBCgKCAQEA45Y8h+mKX0//0H6B+KUcmhyis2dlfI8MlQNo +1qRpsQQKkqY+n6J8mzFPO+XOC/kLia6SShgpGZF79xhC9+Iq+2ARulIbPH3PiUdf +gwnLD/wfFgCmPaFFfJ93v9AY+eWeq00IKkRVp2gfb159C9BZQmoiyPCPOlWuLN/B +ZPMFHZUWPbL+4mvy4BBrcS/+FncUf7dA5ND7lb26G/sXUGWpYPLclhNnu7Hvapi4 +pIx60d8Z3+5eOVHEVECqgIU8wUqTrUbg1YMUHSZxdnsIPnL985sa7a66x/GAgMAi +xuAhUBMyxTUXOXqW+GWIlrmOHmiYRp7ARA3dPbYJJ1kdDfJRswIDAQABo1MwUTAd +BgNVHQ4EFgQUT02NxjNzgnrG7Bzn/cwyLk+rmyUwHwYDVR0jBBgwFoAUT02NxjNz +gnrG7Bzn/cwyLk+rmyUwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOC +AQEAfpqusEPmGoL/Hzkui/xs20k+JKFQ90e5iZPQLyuES7BKQp1iajMOytAIXlhY +dtt4oSOYmBfl2bs8OU0U2mjGetx0AHWINY7bNzg7wxd4H46iiCitC4qlUlGG23bF +GVQt7/SddmwKoOJBaasnRTBPqVTlreqAF4Ni8bY0kqO3GK5QWZ2sL+Btn9dML8aK +wLb6sW0h7rAjik0l3NcsrKE8UoViWjAgB3Oe9L00GSXaMnfD6V65XnzkXvLOpCdd +wjZHPoWinhqM8ZHm/iFSe2UcL1KG0rdrMg8oBY6zMNrgENEdhqEXNJJrioT9bzKE +FyNQDOxpeql/GJl2MGxj0FXy+Q== +-----END CERTIFICATE----- diff --git a/tests/data/generate-crts/ca.key b/tests/data/generate-crts/ca.key new file mode 100644 index 00000000..df26c211 --- /dev/null +++ b/tests/data/generate-crts/ca.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDjljyH6YpfT//Q +foH4pRyaHKKzZ2V8jwyVA2jWpGmxBAqSpj6fonybMU875c4L+QuJrpJKGCkZkXv3 +GEL34ir7YBG6Uhs8fc+JR1+DCcsP/B8WAKY9oUV8n3e/0Bj55Z6rTQgqRFWnaB9v +Xn0L0FlCaiLI8I86Va4s38Fk8wUdlRY9sv7ia/LgEGtxL/4WdxR/t0Dk0PuVvbob ++xdQZalg8tyWE2e7se9qmLikjHrR3xnf7l45UcRUQKqAhTzBSpOtRuDVgxQdJnF2 +ewg+cv3zmxrtrrrH8YCAwCLG4CFQEzLFNRc5epb4ZYiWuY4eaJhGnsBEDd09tgkn +WR0N8lGzAgMBAAECggEAB/+oZ6EeSzSalGSog2lPf13GR3E42znZLVuN/GKRkbZx ++1l7Fffgp+usZznaa11ODywNhCvOi1GA/obhw6iKmNnK2wuLstfmdWKxc/+MyCZf +nqcDMLim9+GllHOR4nve1GfEA7LxzQ0XHb4/vYHjFox4ZdYz5870bNX9triRKMrw +Ru6FMbdcMf8ClTHtqHIQgARRTOFvizqVFElFgrIo4eh8svseed3+xwX98oeC+O0u +WrLW42RpVWIoai+V6OdYzO+uALr8z0IC3yzw1pqLMVt+SLY0nS/qz8b3eruU4BmL +6i0BQvJF8QFCE+gmtZJSQyCXzOw5jcvlDAm10v/1QQKBgQD+eg13guElgrjvkjT6 +a1ry3hA5LfQ7QyqigydyZlkbPfBcnv0dsWXbL+toyu0RqMwcKBGG3/7bVpGx/0gb +EgtnDWgS/PyWpuW+scwhqryzCKLnbUCor3GIo7CkM+71m5BcOHNTUPoqrMt6/W6a +mQ4z+VNpVQlRrx3YXp2zbO/dgwKBgQDk8vqmioezNNNhnk/7LhFITN7TavsVT3LT +jgs9CTSwEKFLC8eo/sKPaTKTcPxhxrt9eOWhYbNNQ9WYzy/ulA/vKvfkZjvtUaHl +sC35E9FcMW6lSiM8LOQlPnJYq6VqIQOdTgtBp1lsmQvkRbAF7Wq2Fy8BjiwPJfV1 +CJejygI0EQKBgCZs96ucL7MiUhqa0TUfENSrg3ee4MoyEjYH5+T2X24lpC3YNBBP +wTmfusRQIAwSmP+HbV4YZLtqDwX5rkGoL+CXvadgXCPDf92Tq2dKCMRgAXlAngra +syIW1Y116hdcLig+vetOxve6r98adaESi3p9o4K8PHQBJViOsPFu+alRAoGAUtb8 +DIB5Y0VM6rheljLv++ocggDmgqpxkMyHknkfQEl0IvRLNQGhIkTdEO5D05kVw+uX +otH4D4/o3FazMC8QqOgyM8kuC8uKudIKgGJEUYhtUY9GuoI/tp4mv6CzxHfXl/Zi +KkpEGAA0hk8UxsBF6UbwMi7gEEcazlLik1gHfhECgYEA0HyiW3LaWfACD2y7gpYu +GDvF+Oo28tK2QBkPBa1FNtZ4BKBquGqe8V6iNuQ5HsZFwycFum/VU0FWoDOyLS5M +vclQT0fojPxlLTjS0B2PRBEv52cNNFwMrj/I/DerqdDh3saUIe1MeJoVIMDTE/Jl +H5v0FaUgorY35MZdyQ54uw0= +-----END PRIVATE KEY----- diff --git a/tests/data/generate-crts/ca.srl b/tests/data/generate-crts/ca.srl new file mode 100644 index 00000000..63d9a189 --- /dev/null +++ b/tests/data/generate-crts/ca.srl @@ -0,0 +1 @@ +51042C938C1D19FADAC29CA05FAE1A16D47AFF53 diff --git a/tests/data/generate-crts/generate-test-certs.sh b/tests/data/generate-crts/generate-test-certs.sh new file mode 100644 index 00000000..488f76d4 --- /dev/null +++ b/tests/data/generate-crts/generate-test-certs.sh @@ -0,0 +1,76 @@ +#!/bin/bash + +CA_KEY="ca.key" +CA_CERT="ca.crt" + +# Output paths for the database and storage certificates and keys +DATABASE_KEY="../database.key" +DATABASE_CSR="database.csr" +DATABASE_CERT="../database.crt" + +STORAGE_KEY="../storage.key" +STORAGE_CSR="storage.csr" +STORAGE_CERT="../storage.crt" + +generate_certificate() { + local KEY_PATH=$1 + local CSR_PATH=$2 + local CERT_PATH=$3 + local CONFIG_FILE=$4 + + openssl req -new -newkey rsa:2048 -nodes -keyout "$KEY_PATH" -out "$CSR_PATH" -config "$CONFIG_FILE" + openssl x509 -req -in "$CSR_PATH" -CA "$CA_CERT" -CAkey "$CA_KEY" -CAcreateserial -out "$CERT_PATH" -days 5475 -sha256 -extensions req_ext -extfile "$CONFIG_FILE" +} + +# Paths to .cnf files, where we will write certificate settings +DATABASE_CONFIG="database-csr.cnf" +STORAGE_CONFIG="storage-csr.cnf" + +cat > $DATABASE_CONFIG < $STORAGE_CONFIG <`) - - for scanner.Scan() { - line := scanner.Text() - - matches := portForwardRegex.FindStringSubmatch(line) - if matches != nil { - localPort, err = strconv.Atoi(matches[1]) - if err != nil { - return err - } - break - } - } - - if localPort != 0 { - if err = f(localPort); err != nil { - return err - } - } else { - content, _ := io.ReadAll(stderr) - - return fmt.Errorf("kubectl port-forward stderr: %s", content) - } - return nil - }, Timeout, test.Interval).Should(BeNil()) -} - func emptyStorageDefaultFields(storage *v1alpha1.Storage) { storage.Spec.Image = nil storage.Spec.Resources = nil @@ -295,7 +59,7 @@ var _ = Describe("Operator smoke test", func() { var databaseSample *v1alpha1.Database BeforeEach(func() { - storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-mirror-3-dc-config.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join("..", "data", "storage-mirror-3-dc-config.yaml")) databaseSample = testobjects.DefaultDatabase() ctx = context.Background() @@ -315,13 +79,13 @@ var _ = Describe("Operator smoke test", func() { } return false }, Timeout, Interval).Should(BeTrue()) - Expect(installOperatorWithHelm(testobjects.YdbNamespace)).Should(BeTrue()) + InstallLocalOperatorWithHelm(testobjects.YdbNamespace) }) It("Check webhook defaulter", func() { emptyStorageDefaultFields(storageSample) Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) emptyDatabaseDefaultFields(databaseSample) Expect(k8sClient.Create(ctx, databaseSample)).Should(Succeed()) @@ -331,7 +95,7 @@ var _ = Describe("Operator smoke test", func() { }) It("Check webhook defaulter with dynconfig and nodeSets", func() { - storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-mirror-3-dc-dynconfig.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join("..", "data", "storage-mirror-3-dc-dynconfig.yaml")) emptyStorageDefaultFields(storageSample) storageSample.Spec.NodeSets = []v1alpha1.StorageNodeSetSpecInline{ { @@ -344,29 +108,29 @@ var _ = Describe("Operator smoke test", func() { }, } Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) }) It("general smoke pipeline, create storage + database", func() { By("issuing create commands...") Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) Expect(k8sClient.Create(ctx, databaseSample)).Should(Succeed()) defer func() { Expect(k8sClient.Delete(ctx, databaseSample)).Should(Succeed()) }() By("waiting until Storage is ready...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("checking that all the storage pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) By("waiting until database is ready...") - waitUntilDatabaseReady(ctx, databaseSample.Name, testobjects.YdbNamespace) + WaitUntilDatabaseReady(ctx, k8sClient, databaseSample.Name, testobjects.YdbNamespace) By("checking that all the database pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) database := v1alpha1.Database{} Expect(k8sClient.Get(ctx, types.NamespacedName{ @@ -382,22 +146,23 @@ var _ = Describe("Operator smoke test", func() { podName := databasePods.Items[0].Name By("bring YDB CLI inside ydb database pod...") - bringYdbCliToPod(podName, testobjects.YdbNamespace) + BringYdbCliToPod(podName, testobjects.YdbNamespace) By("execute simple query inside ydb database pod...") - executeSimpleQuery(podName, testobjects.YdbNamespace, storageEndpoint) + databasePath := DatabasePathWithDefaultDomain(databaseSample) + ExecuteSimpleTableE2ETest(podName, testobjects.YdbNamespace, storageEndpoint, databasePath) }) It("pause and un-pause Storage, should destroy and bring up Pods", func() { By("issuing create commands...") Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) By("waiting until Storage is ready...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("checking that all the storage pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) By("setting storage pause to Paused...") storage := v1alpha1.Storage{} @@ -430,22 +195,22 @@ var _ = Describe("Operator smoke test", func() { Expect(k8sClient.Update(ctx, &storage)).Should(Succeed()) By("expecting storage to become ready again...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("checking that all the storage pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) }) It("freeze + delete StatefulSet + un-freeze Storage", func() { By("issuing create commands...") Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) By("waiting until Storage is ready...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("checking that all the storage pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) By("setting storage operatorSync to false...") storage := v1alpha1.Storage{} @@ -505,15 +270,15 @@ var _ = Describe("Operator smoke test", func() { Expect(k8sClient.Update(ctx, &storage)).Should(Succeed()) By("expecting storage to become ready again...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("checking that all the storage pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) }) It("create storage and database with nodeSets", func() { By("issuing create commands...") - storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-mirror-3-dc-config.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join("..", "data", "storage-mirror-3-dc-config.yaml")) testNodeSetName := "nodeset" for idx := 1; idx <= 3; idx++ { storageSample.Spec.NodeSets = append(storageSample.Spec.NodeSets, v1alpha1.StorageNodeSetSpecInline{ @@ -530,23 +295,23 @@ var _ = Describe("Operator smoke test", func() { }) } Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) Expect(k8sClient.Create(ctx, databaseSample)).Should(Succeed()) defer func() { Expect(k8sClient.Delete(ctx, databaseSample)).Should(Succeed()) }() By("waiting until Storage is ready...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("checking that all the storage pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) By("waiting until database is ready...") - waitUntilDatabaseReady(ctx, databaseSample.Name, testobjects.YdbNamespace) + WaitUntilDatabaseReady(ctx, k8sClient, databaseSample.Name, testobjects.YdbNamespace) By("checking that all the database pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) database := v1alpha1.Database{} databasePods := corev1.PodList{} @@ -594,36 +359,37 @@ var _ = Describe("Operator smoke test", func() { podName := databasePods.Items[0].Name By("bring YDB CLI inside ydb database pod...") - bringYdbCliToPod(podName, testobjects.YdbNamespace) + BringYdbCliToPod(podName, testobjects.YdbNamespace) By("execute simple query inside ydb database pod...") - executeSimpleQuery(podName, testobjects.YdbNamespace, storageEndpoint) + databasePath := DatabasePathWithDefaultDomain(databaseSample) + ExecuteSimpleTableE2ETest(podName, testobjects.YdbNamespace, storageEndpoint, databasePath) }) It("operatorConnection check, create storage with default staticCredentials", func() { By("issuing create commands...") - storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-mirror-3-dc-config-staticCreds.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join("..", "data", "storage-mirror-3-dc-config-staticCreds.yaml")) storageSample.Spec.OperatorConnection = &v1alpha1.ConnectionOptions{ StaticCredentials: &v1alpha1.StaticCredentialsAuth{ Username: "root", }, } Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) By("waiting until Storage is ready...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("checking that all the storage pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) }) It("storage.State goes Pending -> Preparing -> Initializing -> Provisioning -> Ready", func() { Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) By("waiting until Storage is ready...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("tracking storage state changes...") events, err := clientset.CoreV1().Events(testobjects.YdbNamespace).List(context.Background(), @@ -647,101 +413,89 @@ var _ = Describe("Operator smoke test", func() { }) It("using grpcs for storage connection", func() { - By("create secret...") - cert := testobjects.DefaultCertificate( - filepath.Join(".", "data", "tls.crt"), - filepath.Join(".", "data", "tls.key"), - filepath.Join(".", "data", "ca.crt"), - ) - Expect(k8sClient.Create(ctx, cert)).Should(Succeed()) + By("create storage certificate secret...") + storageCert := testobjects.StorageCertificate() + Expect(k8sClient.Create(ctx, storageCert)).Should(Succeed()) defer func() { - Expect(k8sClient.Delete(ctx, cert)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, storageCert)).Should(Succeed()) + }() + By("create database certificate secret...") + databaseCert := testobjects.DatabaseCertificate() + Expect(k8sClient.Create(ctx, databaseCert)).Should(Succeed()) + defer func() { + Expect(k8sClient.Delete(ctx, databaseCert)).Should(Succeed()) }() By("create storage...") - storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-mirror-3-dc-config-tls.yaml")) - storageSample.Spec.Service.GRPC.TLSConfiguration.Enabled = true - storageSample.Spec.Service.GRPC.TLSConfiguration.Certificate = corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{Name: testobjects.CertificateSecretName}, - Key: "tls.crt", - } - storageSample.Spec.Service.GRPC.TLSConfiguration.Key = corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{Name: testobjects.CertificateSecretName}, - Key: "tls.key", - } - storageSample.Spec.Service.GRPC.TLSConfiguration.CertificateAuthority = corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{Name: testobjects.CertificateSecretName}, - Key: "ca.crt", - } + storageSample = testobjects.DefaultStorage(filepath.Join("..", "data", "storage-mirror-3-dc-config-tls.yaml")) + storageSample.Spec.Service.GRPC.TLSConfiguration = testobjects.TLSConfiguration( + testobjects.StorageCertificateSecretName, + ) Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) By("waiting until Storage is ready...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("checking that all the storage pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) By("create database...") - databaseSample.Spec.Service.GRPC.TLSConfiguration.Enabled = true - databaseSample.Spec.Service.GRPC.TLSConfiguration.Certificate = corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{Name: testobjects.CertificateSecretName}, - Key: "tls.crt", - } - databaseSample.Spec.Service.GRPC.TLSConfiguration.Key = corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{Name: testobjects.CertificateSecretName}, - Key: "tls.key", - } - databaseSample.Spec.Service.GRPC.TLSConfiguration.CertificateAuthority = corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{Name: testobjects.CertificateSecretName}, - Key: "ca.crt", - } + databaseSample.Spec.Service.GRPC.TLSConfiguration = testobjects.TLSConfiguration( + testobjects.DatabaseCertificateSecretName, + ) Expect(k8sClient.Create(ctx, databaseSample)).Should(Succeed()) defer func() { Expect(k8sClient.Delete(ctx, databaseSample)).Should(Succeed()) }() By("waiting until database is ready...") - waitUntilDatabaseReady(ctx, databaseSample.Name, testobjects.YdbNamespace) + WaitUntilDatabaseReady(ctx, k8sClient, databaseSample.Name, testobjects.YdbNamespace) By("checking that all the database pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) - storagePods := corev1.PodList{} - Expect(k8sClient.List(ctx, &storagePods, + database := v1alpha1.Database{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: databaseSample.Name, + Namespace: testobjects.YdbNamespace, + }, &database)).Should(Succeed()) + storageEndpoint := database.Spec.StorageEndpoint + + databasePods := corev1.PodList{} + Expect(k8sClient.List(ctx, &databasePods, client.InNamespace(testobjects.YdbNamespace), - client.MatchingLabels{ - "ydb-cluster": "kind-database", - })).Should(Succeed()) - podName := storagePods.Items[0].Name + client.MatchingLabels{"ydb-cluster": "kind-database"}), + ).Should(Succeed()) + podName := databasePods.Items[0].Name - By("bring YDB CLI inside ydb storage pod...") - bringYdbCliToPod(podName, testobjects.YdbNamespace) + By("bring YDB CLI inside ydb database pod...") + BringYdbCliToPod(podName, testobjects.YdbNamespace) - By("execute simple query inside ydb storage pod...") - storageEndpoint := fmt.Sprintf("grpcs://%s:%d", testobjects.StorageGRPCService, testobjects.StorageGRPCPort) - executeSimpleQuery(podName, testobjects.YdbNamespace, storageEndpoint) + By("execute simple query inside ydb database pod...") + databasePath := DatabasePathWithDefaultDomain(databaseSample) + ExecuteSimpleTableE2ETest(podName, testobjects.YdbNamespace, storageEndpoint, databasePath) }) It("Check that Storage deleted after Database...", func() { By("create storage...") Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) By("create database...") Expect(k8sClient.Create(ctx, databaseSample)).Should(Succeed()) By("waiting until Storage is ready...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("checking that all the storage pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) By("waiting until Database is ready...") - waitUntilDatabaseReady(ctx, databaseSample.Name, testobjects.YdbNamespace) + WaitUntilDatabaseReady(ctx, k8sClient, databaseSample.Name, testobjects.YdbNamespace) By("checking that all the database pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) By("delete Storage...") Expect(k8sClient.Delete(ctx, storageSample)).Should(Succeed()) @@ -785,10 +539,10 @@ var _ = Describe("Operator smoke test", func() { It("check storage with dynconfig", func() { By("create storage...") - storageSample = testobjects.DefaultStorage(filepath.Join(".", "data", "storage-mirror-3-dc-dynconfig.yaml")) + storageSample = testobjects.DefaultStorage(filepath.Join("..", "data", "storage-mirror-3-dc-dynconfig.yaml")) Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) storage := v1alpha1.Storage{} By("waiting until StorageInitialized condition is true...") @@ -838,9 +592,9 @@ var _ = Describe("Operator smoke test", func() { }) It("TLS for status service", func() { - tlsHTTPCheck := func(port int) error { + tlsHTTPCheck := func(port int, serverName string) error { url := fmt.Sprintf("https://localhost:%d/", port) - cert, err := os.ReadFile(filepath.Join(".", "data", "ca.crt")) + cert, err := os.ReadFile(testobjects.TestCAPath) Expect(err).ShouldNot(HaveOccurred()) certPool := x509.NewCertPool() @@ -850,7 +604,7 @@ var _ = Describe("Operator smoke test", func() { tlsConfig := &tls.Config{ MinVersion: tls.VersionTLS12, RootCAs: certPool, - ServerName: "storage-grpc.ydb.svc.cluster.local", + ServerName: serverName, } transport := &http.Transport{TLSClientConfig: tlsConfig} @@ -877,70 +631,65 @@ var _ = Describe("Operator smoke test", func() { return nil } - By("create secret...") - cert := testobjects.DefaultCertificate( - filepath.Join(".", "data", "tls.crt"), - filepath.Join(".", "data", "tls.key"), - filepath.Join(".", "data", "ca.crt"), - ) - Expect(k8sClient.Create(ctx, cert)).Should(Succeed()) + By("create storage certificate secret...") + storageCert := testobjects.StorageCertificate() + Expect(k8sClient.Create(ctx, storageCert)).Should(Succeed()) + defer func() { + Expect(k8sClient.Delete(ctx, storageCert)).Should(Succeed()) + }() + By("create database certificate secret...") + databaseCert := testobjects.DatabaseCertificate() + Expect(k8sClient.Create(ctx, databaseCert)).Should(Succeed()) + defer func() { + Expect(k8sClient.Delete(ctx, databaseCert)).Should(Succeed()) + }() By("create storage...") - storageSample.Spec.Service.Status.TLSConfiguration = &v1alpha1.TLSConfiguration{ - Enabled: true, - Certificate: corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{Name: testobjects.CertificateSecretName}, - Key: "tls.crt", - }, - Key: corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{Name: testobjects.CertificateSecretName}, - Key: "tls.key", - }, - CertificateAuthority: corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{Name: testobjects.CertificateSecretName}, - Key: "ca.crt", - }, - } + storageSample.Spec.Service.Status.TLSConfiguration = testobjects.TLSConfiguration( + testobjects.StorageCertificateSecretName, + ) Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) By("create database...") databaseSample.Spec.Nodes = 1 - databaseSample.Spec.Service.Status = *storageSample.Spec.Service.Status.DeepCopy() + databaseSample.Spec.Service.Status.TLSConfiguration = testobjects.TLSConfiguration( + testobjects.DatabaseCertificateSecretName, + ) Expect(k8sClient.Create(ctx, databaseSample)).Should(Succeed()) defer func() { Expect(k8sClient.Delete(ctx, databaseSample)).Should(Succeed()) }() By("waiting until Storage is ready...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("checking that all the storage pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) By("forward storage status port and check that we can check TLS response") - portForward(ctx, + PortForward(ctx, fmt.Sprintf(resources.StatusServiceNameFormat, storageSample.Name), storageSample.Namespace, - v1alpha1.StatusPort, tlsHTTPCheck, + "storage-grpc.ydb.svc.cluster.local", v1alpha1.StatusPort, tlsHTTPCheck, ) By("waiting until database is ready...") - waitUntilDatabaseReady(ctx, databaseSample.Name, testobjects.YdbNamespace) + WaitUntilDatabaseReady(ctx, k8sClient, databaseSample.Name, testobjects.YdbNamespace) By("checking that all the database pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) By("forward database status port and check that we can check TLS response") - portForward(ctx, + PortForward(ctx, fmt.Sprintf(resources.StatusServiceNameFormat, databaseSample.Name), databaseSample.Namespace, - v1alpha1.StatusPort, tlsHTTPCheck, + "database-grpc.ydb.svc.cluster.local", v1alpha1.StatusPort, tlsHTTPCheck, ) }) It("Check encryption for Database", func() { By("create storage...") Expect(k8sClient.Create(ctx, storageSample)).Should(Succeed()) - defer deleteStorageSafely(ctx, storageSample) + defer DeleteStorageSafely(ctx, k8sClient, storageSample) By("create database...") databaseSample.Spec.Encryption = &v1alpha1.EncryptionConfig{ Enabled: true, @@ -951,16 +700,16 @@ var _ = Describe("Operator smoke test", func() { }() By("waiting until Storage is ready...") - waitUntilStorageReady(ctx, storageSample.Name, testobjects.YdbNamespace) + WaitUntilStorageReady(ctx, k8sClient, storageSample.Name, testobjects.YdbNamespace) By("checking that all the storage pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-storage", storageSample.Spec.Nodes) By("waiting until database is ready...") - waitUntilDatabaseReady(ctx, databaseSample.Name, testobjects.YdbNamespace) + WaitUntilDatabaseReady(ctx, k8sClient, databaseSample.Name, testobjects.YdbNamespace) By("checking that all the database pods are running and ready...") - checkPodsRunningAndReady(ctx, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) + CheckPodsRunningAndReady(ctx, k8sClient, "ydb-cluster", "kind-database", databaseSample.Spec.Nodes) database := v1alpha1.Database{} Expect(k8sClient.Get(ctx, types.NamespacedName{ @@ -977,14 +726,15 @@ var _ = Describe("Operator smoke test", func() { podName := databasePods.Items[0].Name By("bring YDB CLI inside ydb database pod...") - bringYdbCliToPod(podName, testobjects.YdbNamespace) + BringYdbCliToPod(podName, testobjects.YdbNamespace) By("execute simple query inside ydb database pod...") - executeSimpleQuery(podName, testobjects.YdbNamespace, storageEndpoint) + databasePath := DatabasePathWithDefaultDomain(databaseSample) + ExecuteSimpleTableE2ETest(podName, testobjects.YdbNamespace, storageEndpoint, databasePath) }) AfterEach(func() { - Expect(uninstallOperatorWithHelm(testobjects.YdbNamespace)).Should(BeTrue()) + UninstallOperatorWithHelm(testobjects.YdbNamespace) Expect(k8sClient.Delete(ctx, &namespace)).Should(Succeed()) Eventually(func(g Gomega) bool { namespaceList := corev1.NamespaceList{} diff --git a/e2e/tests/test-objects/objects.go b/tests/test-k8s-objects/objects.go similarity index 71% rename from e2e/tests/test-objects/objects.go rename to tests/test-k8s-objects/objects.go index c369d4cc..447f6c49 100644 --- a/e2e/tests/test-objects/objects.go +++ b/tests/test-k8s-objects/objects.go @@ -2,8 +2,9 @@ package testobjects import ( "os" + "path/filepath" - . "github.com/onsi/gomega" //nolint:all + . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -11,15 +12,26 @@ import ( ) const ( - YdbImage = "cr.yandex/crptqonuodf51kdj7a7d/ydb:24.2.7" // anchor_for_fetching_image_from_workflow - YdbNamespace = "ydb" - StorageName = "storage" - DatabaseName = "database" - CertificateSecretName = "storage-crt" - DefaultDomain = "Root" - ReadyStatus = "Ready" - StorageGRPCService = "storage-grpc.ydb.svc.cluster.local" - StorageGRPCPort = 2135 + YdbImage = "cr.yandex/crptqonuodf51kdj7a7d/ydb:24.2.7" // anchor_for_fetching_image_from_workflow + YdbNamespace = "ydb" + StorageName = "storage" + DatabaseName = "database" + StorageCertificateSecretName = "storage-crt" + DatabaseCertificateSecretName = "database-crt" + DefaultDomain = "Root" + ReadyStatus = "Ready" + StorageGRPCService = "storage-grpc.ydb.svc.cluster.local" + StorageGRPCPort = 2135 +) + +var ( + TestCAPath = filepath.Join("..", "data", "generate-crts", "ca.crt") + + StorageTLSKeyPath = filepath.Join("..", "data", "storage.key") + StorageTLSCrtPath = filepath.Join("..", "data", "storage.crt") + + DatabaseTLSKeyPath = filepath.Join("..", "data", "database.key") + DatabaseTLSCrtPath = filepath.Join("..", "data", "database.crt") ) func constructAntiAffinityFor(key, value string) *corev1.Affinity { @@ -172,7 +184,25 @@ func DefaultDatabase() *v1alpha1.Database { } } -func DefaultCertificate(certPath, keyPath, caPath string) *corev1.Secret { +func StorageCertificate() *corev1.Secret { + return DefaultCertificate( + StorageCertificateSecretName, + StorageTLSCrtPath, + StorageTLSKeyPath, + TestCAPath, + ) +} + +func DatabaseCertificate() *corev1.Secret { + return DefaultCertificate( + DatabaseCertificateSecretName, + DatabaseTLSCrtPath, + DatabaseTLSKeyPath, + TestCAPath, + ) +} + +func DefaultCertificate(secretName, certPath, keyPath, caPath string) *corev1.Secret { cert, err := os.ReadFile(certPath) Expect(err).To(BeNil()) key, err := os.ReadFile(keyPath) @@ -182,7 +212,7 @@ func DefaultCertificate(certPath, keyPath, caPath string) *corev1.Secret { return &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: CertificateSecretName, + Name: secretName, Namespace: YdbNamespace, }, Type: corev1.SecretTypeOpaque, @@ -193,3 +223,21 @@ func DefaultCertificate(certPath, keyPath, caPath string) *corev1.Secret { }, } } + +func TLSConfiguration(secretName string) *v1alpha1.TLSConfiguration { + return &v1alpha1.TLSConfiguration{ + Enabled: true, + Certificate: corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: "tls.crt", + }, + Key: corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: "tls.key", + }, + CertificateAuthority: corev1.SecretKeySelector{ + LocalObjectReference: corev1.LocalObjectReference{Name: secretName}, + Key: "ca.crt", + }, + } +} diff --git a/tests/test-utils/test-utils.go b/tests/test-utils/test-utils.go new file mode 100644 index 00000000..43cb86ed --- /dev/null +++ b/tests/test-utils/test-utils.go @@ -0,0 +1,368 @@ +package testutils + +import ( + "bufio" + "context" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + "strings" + "time" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + meta "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + v1alpha1 "github.com/ydb-platform/ydb-kubernetes-operator/api/v1alpha1" + . "github.com/ydb-platform/ydb-kubernetes-operator/internal/controllers/constants" + testobjects "github.com/ydb-platform/ydb-kubernetes-operator/tests/test-k8s-objects" +) + +const ( + ConsistentConditionTimeout = time.Second * 30 + Timeout = time.Second * 600 + Interval = time.Second * 2 + YdbOperatorRemoteChart = "ydb/ydb-operator" + YdbOperatorReleaseName = "ydb-operator" +) + +var ( + pathToHelmValuesInLocalInstall = filepath.Join("..", "cfg", "operator-local-values.yaml") + pathToHelmValuesInRemoteInstall = filepath.Join("..", "cfg", "operator-values.yaml") +) + +func InstallLocalOperatorWithHelm(namespace string) { + args := []string{ + "-n", namespace, + "install", + "--wait", + "ydb-operator", + filepath.Join("..", "..", "deploy", "ydb-operator"), + "-f", pathToHelmValuesInLocalInstall, + } + + result := exec.Command("helm", args...) + stdout, err := result.Output() + Expect(err).To(BeNil()) + Expect(stdout).To(ContainSubstring("deployed")) +} + +func InstallOperatorWithHelm(namespace, version string) { + args := []string{ + "-n", namespace, + "install", + "--wait", + "ydb-operator", + YdbOperatorRemoteChart, + "-f", pathToHelmValuesInRemoteInstall, + "--version", version, + } + + Expect(exec.Command("helm", "repo", "add", "ydb", "https://charts.ydb.tech/").Run()).To(Succeed()) + Expect(exec.Command("helm", "repo", "update").Run()).To(Succeed()) + + installCommand := exec.Command("helm", args...) + output, err := installCommand.CombinedOutput() + Expect(err).To(BeNil()) + Expect(string(output)).To(ContainSubstring("deployed")) +} + +func UninstallOperatorWithHelm(namespace string) { + args := []string{ + "-n", namespace, + "uninstall", + "--wait", + "ydb-operator", + } + result := exec.Command("helm", args...) + stdout, err := result.Output() + Expect(err).To(BeNil()) + Expect(stdout).To(ContainSubstring("uninstalled")) +} + +func UpgradeOperatorWithHelm(namespace, version string) { + args := []string{ + "-n", namespace, + "upgrade", + "--wait", + "ydb-operator", + YdbOperatorRemoteChart, + "--version", version, + "-f", pathToHelmValuesInLocalInstall, + } + + cmd := exec.Command("helm", args...) + cmd.Stdout = GinkgoWriter + cmd.Stderr = GinkgoWriter + + Expect(cmd.Run()).Should(Succeed()) +} + +func WaitUntilStorageReady(ctx context.Context, k8sClient client.Client, storageName, namespace string) { + Eventually(func() bool { + storage := &v1alpha1.Storage{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: storageName, + Namespace: namespace, + }, storage) + if err != nil { + return false + } + + return meta.IsStatusConditionPresentAndEqual( + storage.Status.Conditions, + StorageInitializedCondition, + metav1.ConditionTrue, + ) && storage.Status.State == testobjects.ReadyStatus + }, Timeout, Interval).Should(BeTrue()) +} + +func WaitUntilDatabaseReady(ctx context.Context, k8sClient client.Client, databaseName, namespace string) { + Eventually(func() bool { + database := &v1alpha1.Database{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: databaseName, + Namespace: namespace, + }, database) + if err != nil { + return false + } + + return meta.IsStatusConditionPresentAndEqual( + database.Status.Conditions, + DatabaseInitializedCondition, + metav1.ConditionTrue, + ) && database.Status.State == testobjects.ReadyStatus + }, Timeout, Interval).Should(BeTrue()) +} + +func CheckPodsRunningAndReady(ctx context.Context, k8sClient client.Client, podLabelKey, podLabelValue string, nPods int32) { + Eventually(func(g Gomega) bool { + pods := corev1.PodList{} + g.Expect(k8sClient.List(ctx, &pods, client.InNamespace(testobjects.YdbNamespace), client.MatchingLabels{ + podLabelKey: podLabelValue, + })).Should(Succeed()) + g.Expect(len(pods.Items)).Should(BeEquivalentTo(nPods)) + for _, pod := range pods.Items { + g.Expect(pod.Status.Phase).Should(BeEquivalentTo("Running")) + g.Expect(podIsReady(pod.Status.Conditions)).Should(BeTrue()) + } + return true + }, Timeout, Interval).Should(BeTrue()) + + Consistently(func(g Gomega) bool { + pods := corev1.PodList{} + g.Expect(k8sClient.List(ctx, &pods, client.InNamespace(testobjects.YdbNamespace), client.MatchingLabels{ + podLabelKey: podLabelValue, + })).Should(Succeed()) + g.Expect(len(pods.Items)).Should(BeEquivalentTo(nPods)) + for _, pod := range pods.Items { + g.Expect(pod.Status.Phase).Should(BeEquivalentTo("Running")) + g.Expect(podIsReady(pod.Status.Conditions)).Should(BeTrue()) + } + return true + }, ConsistentConditionTimeout, Interval).Should(BeTrue()) +} + +func podIsReady(conditions []corev1.PodCondition) bool { + for _, condition := range conditions { + if condition.Type == corev1.PodReady && condition.Status == corev1.ConditionTrue { + return true + } + } + return false +} + +func BringYdbCliToPod(podName, podNamespace string) { + expectedCliLocation := fmt.Sprintf("%v/ydb/bin/ydb", os.ExpandEnv("$HOME")) + + _, err := os.Stat(expectedCliLocation) + Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Expected YDB CLI at path %s to exist", expectedCliLocation)) + + Eventually(func(g Gomega) error { + args := []string{ + "-n", + podNamespace, + "cp", + expectedCliLocation, + fmt.Sprintf("%v:/tmp/ydb", podName), + } + cmd := exec.Command("kubectl", args...) + return cmd.Run() + }, Timeout, Interval).Should(BeNil()) +} + +func ExecuteSimpleTableE2ETest(podName, podNamespace, storageEndpoint string, databasePath string) { + tablePath := "testfolder/testtable" + + tableCreatingInterval := time.Second * 10 + + Eventually(func(g Gomega) { + args := []string{ + "-n", podNamespace, + "exec", podName, + "--", + "/tmp/ydb", + "-d", databasePath, + "-e", storageEndpoint, + "yql", + "-s", + fmt.Sprintf("CREATE TABLE `%s` (testColumnA Utf8, testColumnB Utf8, PRIMARY KEY (testColumnA));", tablePath), + } + output, _ := exec.Command("kubectl", args...).CombinedOutput() + fmt.Println(string(output)) + }, Timeout, tableCreatingInterval).Should(Succeed()) + + argsInsert := []string{ + "-n", podNamespace, + "exec", podName, + "--", + "/tmp/ydb", + "-d", databasePath, + "-e", storageEndpoint, + "yql", + "-s", + fmt.Sprintf("INSERT INTO `%s` (testColumnA, testColumnB) VALUES ('valueA', 'valueB');", tablePath), + } + output, err := exec.Command("kubectl", argsInsert...).CombinedOutput() + Expect(err).ShouldNot(HaveOccurred(), string(output)) + + argsSelect := []string{ + "-n", podNamespace, + "exec", podName, + "--", + "/tmp/ydb", + "-d", databasePath, + "-e", storageEndpoint, + "yql", + "--format", "csv", + "-s", + fmt.Sprintf("SELECT * FROM `%s`;", tablePath), + } + output, err = exec.Command("kubectl", argsSelect...).CombinedOutput() + Expect(err).ShouldNot(HaveOccurred(), string(output)) + Expect(strings.TrimSpace(string(output))).To(ContainSubstring("\"valueA\",\"valueB\"")) + + argsDrop := []string{ + "-n", podNamespace, + "exec", podName, + "--", + "/tmp/ydb", + "-d", databasePath, + "-e", storageEndpoint, + "yql", + "-s", + fmt.Sprintf("DROP TABLE `%s`;", tablePath), + } + output, err = exec.Command("kubectl", argsDrop...).CombinedOutput() + Expect(err).ShouldNot(HaveOccurred(), string(output)) +} + +func PortForward( + ctx context.Context, + svcName, svcNamespace, serverName string, + port int, + f func(int, string) error, +) { + Eventually(func(g Gomega) error { + args := []string{ + "-n", svcNamespace, + "port-forward", + fmt.Sprintf("svc/%s", svcName), + fmt.Sprintf(":%d", port), + } + + cmd := exec.CommandContext(ctx, "kubectl", args...) + stdout, err := cmd.StdoutPipe() + if err != nil { + return err + } + + stderr, err := cmd.StderrPipe() + if err != nil { + return err + } + + if err = cmd.Start(); err != nil { + return err + } + + defer func() { + err := cmd.Process.Kill() + if err != nil { + _, _ = fmt.Fprintf(GinkgoWriter, "Unable to kill process: %s", err) + } + }() + + localPort := 0 + + scanner := bufio.NewScanner(stdout) + portForwardRegex := regexp.MustCompile(`Forwarding from 127.0.0.1:(\d+) ->`) + + for scanner.Scan() { + line := scanner.Text() + + matches := portForwardRegex.FindStringSubmatch(line) + if matches != nil { + localPort, err = strconv.Atoi(matches[1]) + if err != nil { + return err + } + break + } + } + + if localPort != 0 { + if err = f(localPort, serverName); err != nil { + return err + } + } else { + content, _ := io.ReadAll(stderr) + return fmt.Errorf("kubectl port-forward stderr: %s", content) + } + return nil + }, Timeout, Interval).Should(BeNil()) +} + +func DeleteStorageSafely(ctx context.Context, k8sClient client.Client, storage *v1alpha1.Storage) { + // not checking that deletion completed successfully + // because some tests delete storage themselves and + // it may already be deleted. + _ = k8sClient.Delete(ctx, storage) + + Eventually(func() bool { + fetched := v1alpha1.Storage{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: storage.Name, + Namespace: testobjects.YdbNamespace, + }, &fetched) + return apierrors.IsNotFound(err) + }, Timeout, Interval).Should(BeTrue()) +} + +func DeleteDatabase(ctx context.Context, k8sClient client.Client, database *v1alpha1.Database) { + Expect(k8sClient.Delete(ctx, database)).To(Succeed()) + + Eventually(func() bool { + fetched := v1alpha1.Storage{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: database.Name, + Namespace: testobjects.YdbNamespace, + }, &fetched) + return apierrors.IsNotFound(err) + }, Timeout, Interval).Should(BeTrue()) +} + +func DatabasePathWithDefaultDomain(database *v1alpha1.Database) string { + return fmt.Sprintf("/%s/%s", testobjects.DefaultDomain, database.Name) +}