From 532331aa7ab02c24df3803f3bf1e1f40a71ec470 Mon Sep 17 00:00:00 2001 From: Rajesh Tailor Date: Wed, 2 Jul 2025 19:33:22 +0530 Subject: [PATCH 1/2] Restructure nova-operator for multi-group support This commit restructures the nova-operator codebase to support multi-group operators by: - Moving API definitions from api/ to apis/nova/v1beta1/ - Reorganizing controllers into controllers/nova/ directory - Relocating test files to test/functional/nova/ and test/kuttl/nova/ - Updating all import paths and references throughout the codebase - Modifying build and configuration files to work with new structure This restructuring prepares the operator to support multiple API groups, which is required to merge placement-operator codebase into nova-operator Commit message assisted by: claude-4-sonnet Related: OSPRH-14957 --- .github/workflows/lints.yaml | 2 +- Makefile | 24 ++++++++-------- PROJECT | 21 ++++++++------ .../bases/nova.openstack.org_nova.yaml | 0 .../bases/nova.openstack.org_novaapis.yaml | 0 .../bases/nova.openstack.org_novacells.yaml | 0 .../nova.openstack.org_novacomputes.yaml | 0 .../nova.openstack.org_novaconductors.yaml | 0 .../nova.openstack.org_novametadata.yaml | 0 .../nova.openstack.org_novanovncproxies.yaml | 0 .../nova.openstack.org_novaschedulers.yaml | 0 {api => apis}/go.mod | 2 +- {api => apis}/go.sum | 0 {api => apis/nova}/v1beta1/common_types.go | 0 {api => apis/nova}/v1beta1/common_webhook.go | 0 {api => apis/nova}/v1beta1/conditions.go | 0 .../nova}/v1beta1/groupversion_info.go | 0 {api => apis/nova}/v1beta1/nova_types.go | 0 {api => apis/nova}/v1beta1/nova_webhook.go | 0 {api => apis/nova}/v1beta1/novaapi_types.go | 0 {api => apis/nova}/v1beta1/novaapi_webhook.go | 0 {api => apis/nova}/v1beta1/novacell_types.go | 0 .../nova}/v1beta1/novacell_webhook.go | 0 .../nova}/v1beta1/novacompute_types.go | 0 .../nova}/v1beta1/novacompute_webhook.go | 0 .../nova}/v1beta1/novaconductor_types.go | 0 .../nova}/v1beta1/novaconductor_webhook.go | 0 .../nova}/v1beta1/novametadata_types.go | 0 .../nova}/v1beta1/novametadata_webhook.go | 0 .../nova}/v1beta1/novanovncproxy_types.go | 0 .../nova}/v1beta1/novanovncproxy_webhook.go | 0 .../nova}/v1beta1/novascheduler_types.go | 0 .../nova}/v1beta1/novascheduler_webhook.go | 0 .../nova}/v1beta1/zz_generated.deepcopy.go | 0 config/crd/kustomization.yaml | 28 +++++++++---------- ...ova.yaml => cainjection_in_nova_nova.yaml} | 0 ...yaml => cainjection_in_nova_novaapis.yaml} | 0 ...aml => cainjection_in_nova_novacells.yaml} | 0 ... => cainjection_in_nova_novacomputes.yaml} | 0 ...> cainjection_in_nova_novaconductors.yaml} | 0 ... => cainjection_in_nova_novametadata.yaml} | 0 ...cainjection_in_nova_novanovncproxies.yaml} | 0 ...> cainjection_in_nova_novaschedulers.yaml} | 0 ...in_nova.yaml => webhook_in_nova_nova.yaml} | 0 ...pis.yaml => webhook_in_nova_novaapis.yaml} | 0 ...ls.yaml => webhook_in_nova_novacells.yaml} | 0 ...yaml => webhook_in_nova_novacomputes.yaml} | 0 ...ml => webhook_in_nova_novaconductors.yaml} | 0 ...yaml => webhook_in_nova_novametadata.yaml} | 0 ... => webhook_in_nova_novanovncproxies.yaml} | 0 ...ml => webhook_in_nova_novaschedulers.yaml} | 0 controllers/{ => nova}/common.go | 2 +- controllers/{ => nova}/nova_controller.go | 2 +- controllers/{ => nova}/novaapi_controller.go | 2 +- controllers/{ => nova}/novacell_controller.go | 2 +- .../{ => nova}/novacompute_controller.go | 2 +- .../{ => nova}/novaconductor_controller.go | 2 +- .../{ => nova}/novametadata_controller.go | 2 +- .../{ => nova}/novanovncproxy_controller.go | 2 +- .../{ => nova}/novascheduler_controller.go | 2 +- go.mod | 4 +-- main.go | 6 ++-- pkg/nova/celldelete.go | 2 +- pkg/nova/cellmapping.go | 2 +- pkg/nova/host_discover.go | 2 +- pkg/novaapi/deployment.go | 2 +- pkg/novacompute/deployment.go | 2 +- pkg/novaconductor/dbpurge.go | 2 +- pkg/novaconductor/dbsync.go | 2 +- pkg/novaconductor/deployment.go | 2 +- pkg/novametadata/deployment.go | 2 +- pkg/novascheduler/deployment.go | 2 +- pkg/novncproxy/deployment.go | 2 +- test/functional/{ => nova}/api_fixture.go | 0 test/functional/{ => nova}/base_test.go | 2 +- .../nova_compute_ironic_controller_test.go | 2 +- .../{ => nova}/nova_controller_test.go | 10 +++---- .../nova_metadata_controller_test.go | 2 +- .../{ => nova}/nova_multicell_test.go | 12 ++++---- .../{ => nova}/nova_novncproxy_test.go | 2 +- .../{ => nova}/nova_reconfiguration_test.go | 2 +- .../{ => nova}/nova_scheduler_test.go | 2 +- .../{ => nova}/novaapi_controller_test.go | 2 +- .../{ => nova}/novacell_controller_test.go | 2 +- .../novaconductor_controller_test.go | 2 +- test/functional/{ => nova}/sample_test.go | 2 +- test/functional/{ => nova}/suite_test.go | 12 ++++---- .../{ => nova}/validation_webhook_test.go | 0 .../default/cell-tests/00-cleanup-nova.yaml | 0 .../default/cell-tests/01-assert.yaml | 0 .../default/cell-tests/01-deploy.yaml | 0 .../default/cell-tests/02-assert.yaml | 0 .../cell-tests/02-delete-cell-nova.yaml | 0 .../default/cell-tests/03-cleanup-nova.yaml | 0 .../default/common/cleanup-nova.yaml | 0 .../default/config-tests/00-cleanup-nova.yaml | 0 .../default/config-tests/01-assert.yaml | 0 ...-deploy-with-default-config-overwrite.yaml | 0 .../default/config-tests/02-assert.yaml | 0 .../config-tests/02-enable-notifications.yaml | 0 .../default/config-tests/03-cleanup-nova.yaml | 0 .../test-suites/default/config.yaml | 0 .../default/deps/OpenStackControlPlane.yaml | 0 .../test-suites/default/deps/infra.yaml | 0 .../test-suites/default/deps/keystone.yaml | 0 .../default/deps/kustomization.yaml | 0 .../test-suites/default/deps/namespace.yaml | 0 .../test-suites/default/deps/nova.yaml | 0 .../test-suites/default/deps/placement.yaml | 0 .../test-suites/default/output/.keep | 0 .../default/scale-tests/00-cleanup-nova.yaml | 0 .../default/scale-tests/01-assert.yaml | 0 .../default/scale-tests/01-deploy.yaml | 0 .../default/scale-tests/02-assert.yaml | 0 .../default/scale-tests/02-scale-up-nova.yaml | 0 .../default/scale-tests/03-assert.yaml | 0 .../scale-tests/03-scale-down-nova.yaml | 0 .../default/scale-tests/04-assert.yaml | 0 .../scale-tests/04-scale-down-zero-nova.yaml | 0 .../default/scale-tests/05-cleanup-nova.yaml | 0 120 files changed, 93 insertions(+), 88 deletions(-) rename {api => apis}/bases/nova.openstack.org_nova.yaml (100%) rename {api => apis}/bases/nova.openstack.org_novaapis.yaml (100%) rename {api => apis}/bases/nova.openstack.org_novacells.yaml (100%) rename {api => apis}/bases/nova.openstack.org_novacomputes.yaml (100%) rename {api => apis}/bases/nova.openstack.org_novaconductors.yaml (100%) rename {api => apis}/bases/nova.openstack.org_novametadata.yaml (100%) rename {api => apis}/bases/nova.openstack.org_novanovncproxies.yaml (100%) rename {api => apis}/bases/nova.openstack.org_novaschedulers.yaml (100%) rename {api => apis}/go.mod (98%) rename {api => apis}/go.sum (100%) rename {api => apis/nova}/v1beta1/common_types.go (100%) rename {api => apis/nova}/v1beta1/common_webhook.go (100%) rename {api => apis/nova}/v1beta1/conditions.go (100%) rename {api => apis/nova}/v1beta1/groupversion_info.go (100%) rename {api => apis/nova}/v1beta1/nova_types.go (100%) rename {api => apis/nova}/v1beta1/nova_webhook.go (100%) rename {api => apis/nova}/v1beta1/novaapi_types.go (100%) rename {api => apis/nova}/v1beta1/novaapi_webhook.go (100%) rename {api => apis/nova}/v1beta1/novacell_types.go (100%) rename {api => apis/nova}/v1beta1/novacell_webhook.go (100%) rename {api => apis/nova}/v1beta1/novacompute_types.go (100%) rename {api => apis/nova}/v1beta1/novacompute_webhook.go (100%) rename {api => apis/nova}/v1beta1/novaconductor_types.go (100%) rename {api => apis/nova}/v1beta1/novaconductor_webhook.go (100%) rename {api => apis/nova}/v1beta1/novametadata_types.go (100%) rename {api => apis/nova}/v1beta1/novametadata_webhook.go (100%) rename {api => apis/nova}/v1beta1/novanovncproxy_types.go (100%) rename {api => apis/nova}/v1beta1/novanovncproxy_webhook.go (100%) rename {api => apis/nova}/v1beta1/novascheduler_types.go (100%) rename {api => apis/nova}/v1beta1/novascheduler_webhook.go (100%) rename {api => apis/nova}/v1beta1/zz_generated.deepcopy.go (100%) rename config/crd/patches/{cainjection_in_nova.yaml => cainjection_in_nova_nova.yaml} (100%) rename config/crd/patches/{cainjection_in_novaapis.yaml => cainjection_in_nova_novaapis.yaml} (100%) rename config/crd/patches/{cainjection_in_novacells.yaml => cainjection_in_nova_novacells.yaml} (100%) rename config/crd/patches/{cainjection_in_novacomputes.yaml => cainjection_in_nova_novacomputes.yaml} (100%) rename config/crd/patches/{cainjection_in_novaconductors.yaml => cainjection_in_nova_novaconductors.yaml} (100%) rename config/crd/patches/{cainjection_in_novametadata.yaml => cainjection_in_nova_novametadata.yaml} (100%) rename config/crd/patches/{cainjection_in_novanovncproxies.yaml => cainjection_in_nova_novanovncproxies.yaml} (100%) rename config/crd/patches/{cainjection_in_novaschedulers.yaml => cainjection_in_nova_novaschedulers.yaml} (100%) rename config/crd/patches/{webhook_in_nova.yaml => webhook_in_nova_nova.yaml} (100%) rename config/crd/patches/{webhook_in_novaapis.yaml => webhook_in_nova_novaapis.yaml} (100%) rename config/crd/patches/{webhook_in_novacells.yaml => webhook_in_nova_novacells.yaml} (100%) rename config/crd/patches/{webhook_in_novacomputes.yaml => webhook_in_nova_novacomputes.yaml} (100%) rename config/crd/patches/{webhook_in_novaconductors.yaml => webhook_in_nova_novaconductors.yaml} (100%) rename config/crd/patches/{webhook_in_novametadata.yaml => webhook_in_nova_novametadata.yaml} (100%) rename config/crd/patches/{webhook_in_novanovncproxies.yaml => webhook_in_nova_novanovncproxies.yaml} (100%) rename config/crd/patches/{webhook_in_novaschedulers.yaml => webhook_in_nova_novaschedulers.yaml} (100%) rename controllers/{ => nova}/common.go (99%) rename controllers/{ => nova}/nova_controller.go (99%) rename controllers/{ => nova}/novaapi_controller.go (99%) rename controllers/{ => nova}/novacell_controller.go (99%) rename controllers/{ => nova}/novacompute_controller.go (99%) rename controllers/{ => nova}/novaconductor_controller.go (99%) rename controllers/{ => nova}/novametadata_controller.go (99%) rename controllers/{ => nova}/novanovncproxy_controller.go (99%) rename controllers/{ => nova}/novascheduler_controller.go (99%) rename test/functional/{ => nova}/api_fixture.go (100%) rename test/functional/{ => nova}/base_test.go (99%) rename test/functional/{ => nova}/nova_compute_ironic_controller_test.go (99%) rename test/functional/{ => nova}/nova_controller_test.go (99%) rename test/functional/{ => nova}/nova_metadata_controller_test.go (99%) rename test/functional/{ => nova}/nova_multicell_test.go (98%) rename test/functional/{ => nova}/nova_novncproxy_test.go (99%) rename test/functional/{ => nova}/nova_reconfiguration_test.go (99%) rename test/functional/{ => nova}/nova_scheduler_test.go (99%) rename test/functional/{ => nova}/novaapi_controller_test.go (99%) rename test/functional/{ => nova}/novacell_controller_test.go (99%) rename test/functional/{ => nova}/novaconductor_controller_test.go (99%) rename test/functional/{ => nova}/sample_test.go (99%) rename test/functional/{ => nova}/suite_test.go (96%) rename test/functional/{ => nova}/validation_webhook_test.go (100%) rename test/kuttl/{ => nova}/test-suites/default/cell-tests/00-cleanup-nova.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/cell-tests/01-assert.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/cell-tests/01-deploy.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/cell-tests/02-assert.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/cell-tests/02-delete-cell-nova.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/cell-tests/03-cleanup-nova.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/common/cleanup-nova.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/config-tests/00-cleanup-nova.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/config-tests/01-assert.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/config-tests/01-deploy-with-default-config-overwrite.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/config-tests/02-assert.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/config-tests/02-enable-notifications.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/config-tests/03-cleanup-nova.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/config.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/deps/OpenStackControlPlane.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/deps/infra.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/deps/keystone.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/deps/kustomization.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/deps/namespace.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/deps/nova.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/deps/placement.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/output/.keep (100%) rename test/kuttl/{ => nova}/test-suites/default/scale-tests/00-cleanup-nova.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/scale-tests/01-assert.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/scale-tests/01-deploy.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/scale-tests/02-assert.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/scale-tests/02-scale-up-nova.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/scale-tests/03-assert.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/scale-tests/03-scale-down-nova.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/scale-tests/04-assert.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/scale-tests/04-scale-down-zero-nova.yaml (100%) rename test/kuttl/{ => nova}/test-suites/default/scale-tests/05-cleanup-nova.yaml (100%) diff --git a/.github/workflows/lints.yaml b/.github/workflows/lints.yaml index 8a44aa52b..08a125575 100644 --- a/.github/workflows/lints.yaml +++ b/.github/workflows/lints.yaml @@ -11,4 +11,4 @@ jobs: uses: actions/checkout@v2 - name: check for replace lines in go.mod files run: | - ! egrep --invert-match -e '^replace.*/api => \./api|^replace.*//allow-merging$' `find . -name 'go.mod'` | egrep -e 'go.mod:replace' + ! egrep --invert-match -e '^replace.*/apis => \./apis|^replace.*//allow-merging$' `find . -name 'go.mod'` | egrep -e 'go.mod:replace' diff --git a/Makefile b/Makefile index 1bb5f22e3..ee7b8a4b2 100644 --- a/Makefile +++ b/Makefile @@ -113,7 +113,7 @@ docker-buildx: ## Build and push docker image for the manager for cross-platform .PHONY: manifests manifests: gowork controller-gen ## Generate WebhookConfiguration, ClusterRole and CustomResourceDefinition objects. $(CONTROLLER_GEN) rbac:roleName=manager-role crd webhook paths="./..." output:crd:artifacts:config=config/crd/bases && \ - rm -f api/bases/* && cp -a config/crd/bases api/ + rm -f apis/bases/* && cp -a config/crd/bases apis/ .PHONY: generate generate: controller-gen ## Generate code containing DeepCopy, DeepCopyInto, and DeepCopyObject method implementations. @@ -126,13 +126,13 @@ fmt: ## Run go fmt against code. .PHONY: vet vet: gowork ## Run go vet against code. go vet ./... - go vet ./api/... + go vet ./apis/... .PHONY: tidy tidy: ## Run go mod tidy on every mod file in the repo go mod tidy - cd ./api && go mod tidy + cd ./apis && go mod tidy .PHONY: golangci-lint golangci-lint: @@ -147,7 +147,7 @@ PROC_CMD = --procs ${PROCS} test: manifests generate fmt vet envtest ginkgo ## Run tests. KUBEBUILDER_ASSETS="$(shell $(ENVTEST) -v debug --bin-dir $(LOCALBIN) use $(ENVTEST_K8S_VERSION) -p path)" \ OPERATOR_TEMPLATES="$(PWD)/templates" \ - $(GINKGO) --trace --cover --coverpkg=../../pkg/...,../../controllers,../../api/v1beta1 --coverprofile cover.out --covermode=atomic --randomize-all ${PROC_CMD} $(GINKGO_ARGS) ./test/... + $(GINKGO) --trace --cover --coverpkg=../../pkg/...,../../controllers,../../apis/nova/v1beta1 --coverprofile cover.out --covermode=atomic --randomize-all ${PROC_CMD} $(GINKGO_ARGS) ./test/... ##@ Build @@ -333,12 +333,12 @@ get-ci-tools: # Run go fmt against code gofmt: get-ci-tools $(CI_TOOLS_REPO_DIR)/test-runner/gofmt.sh - $(CI_TOOLS_REPO_DIR)/test-runner/gofmt.sh ./api + $(CI_TOOLS_REPO_DIR)/test-runner/gofmt.sh ./apis # Run go vet against code govet: get-ci-tools GOWORK=off $(CI_TOOLS_REPO_DIR)/test-runner/govet.sh - GOWORK=off $(CI_TOOLS_REPO_DIR)/test-runner/govet.sh ./api + GOWORK=off $(CI_TOOLS_REPO_DIR)/test-runner/govet.sh ./apis # Run go test against code gotest: test @@ -346,23 +346,23 @@ gotest: test # Run golangci-lint test against code golangci: get-ci-tools GOWORK=off $(CI_TOOLS_REPO_DIR)/test-runner/golangci.sh - GOWORK=off $(CI_TOOLS_REPO_DIR)/test-runner/golangci.sh ./api + GOWORK=off $(CI_TOOLS_REPO_DIR)/test-runner/golangci.sh ./apis # Run go lint against code golint: get-ci-tools export GOWORK=off && PATH=$(GOBIN):$(PATH); $(CI_TOOLS_REPO_DIR)/test-runner/golint.sh - export GOWORK=off && PATH=$(GOBIN):$(PATH); $(CI_TOOLS_REPO_DIR)/test-runner/golint.sh ./api + export GOWORK=off && PATH=$(GOBIN):$(PATH); $(CI_TOOLS_REPO_DIR)/test-runner/golint.sh ./apis .PHONY: operator-lint operator-lint: $(LOCALBIN) gowork ## Runs operator-lint GOBIN=$(LOCALBIN) go install github.com/gibizer/operator-lint@v0.5.0 - go vet -vettool=$(LOCALBIN)/operator-lint ./... ./api/... + go vet -vettool=$(LOCALBIN)/operator-lint ./... ./apis/... .PHONY: gowork gowork: ## Generate go.work file test -f go.work || GOTOOLCHAIN=$(GOTOOLCHAIN_VERSION) go work init go work use . - go work use ./api + go work use ./apis go work sync OPERATOR_NAMESPACE ?= openstack-operators @@ -431,8 +431,8 @@ force-bump: ## Force bump operator and lib-common dependencies for dep in $$(cat go.mod | grep openstack-k8s-operators | grep -vE -- 'indirect|nova-operator|^replace' | awk '{print $$1}'); do \ go get $$dep@$(BRANCH) ; \ done - for dep in $$(cat api/go.mod | grep openstack-k8s-operators | grep -vE -- 'indirect|nova-operator|^replace' | awk '{print $$1}'); do \ - cd ./api && go get $$dep@$(BRANCH) && cd .. ; \ + for dep in $$(cat apis/go.mod | grep openstack-k8s-operators | grep -vE -- 'indirect|nova-operator|^replace' | awk '{print $$1}'); do \ + cd ./apis && go get $$dep@$(BRANCH) && cd .. ; \ done PHONY: crd-schema-check diff --git a/PROJECT b/PROJECT index 89c2c00a5..e09f55324 100644 --- a/PROJECT +++ b/PROJECT @@ -1,6 +1,11 @@ +# Code generated by tool. DO NOT EDIT. +# This file is used to track the info used to scaffold your project +# and allow the plugins properly work. +# More info: https://book.kubebuilder.io/reference/project-config.html domain: openstack.org layout: - go.kubebuilder.io/v3 +multigroup: true plugins: manifests.sdk.operatorframework.io/v2: {} scorecard.sdk.operatorframework.io/v2: {} @@ -14,7 +19,7 @@ resources: domain: openstack.org group: nova kind: NovaAPI - path: github.com/openstack-k8s-operators/nova-operator/api/v1beta1 + path: github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1 version: v1beta1 webhooks: defaulting: true @@ -27,7 +32,7 @@ resources: domain: openstack.org group: nova kind: NovaScheduler - path: github.com/openstack-k8s-operators/nova-operator/api/v1beta1 + path: github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1 version: v1beta1 webhooks: defaulting: true @@ -40,7 +45,7 @@ resources: domain: openstack.org group: nova kind: NovaConductor - path: github.com/openstack-k8s-operators/nova-operator/api/v1beta1 + path: github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1 version: v1beta1 webhooks: defaulting: true @@ -53,7 +58,7 @@ resources: domain: openstack.org group: nova kind: NovaMetadata - path: github.com/openstack-k8s-operators/nova-operator/api/v1beta1 + path: github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1 version: v1beta1 webhooks: defaulting: true @@ -66,7 +71,7 @@ resources: domain: openstack.org group: nova kind: NovaNoVNCProxy - path: github.com/openstack-k8s-operators/nova-operator/api/v1beta1 + path: github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1 version: v1beta1 webhooks: defaulting: true @@ -79,7 +84,7 @@ resources: domain: openstack.org group: nova kind: NovaCell - path: github.com/openstack-k8s-operators/nova-operator/api/v1beta1 + path: github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1 version: v1beta1 webhooks: defaulting: true @@ -92,7 +97,7 @@ resources: domain: openstack.org group: nova kind: Nova - path: github.com/openstack-k8s-operators/nova-operator/api/v1beta1 + path: github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1 version: v1beta1 webhooks: defaulting: true @@ -105,7 +110,7 @@ resources: domain: openstack.org group: nova kind: NovaCompute - path: github.com/openstack-k8s-operators/nova-operator/api/v1beta1 + path: github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1 version: v1beta1 webhooks: defaulting: true diff --git a/api/bases/nova.openstack.org_nova.yaml b/apis/bases/nova.openstack.org_nova.yaml similarity index 100% rename from api/bases/nova.openstack.org_nova.yaml rename to apis/bases/nova.openstack.org_nova.yaml diff --git a/api/bases/nova.openstack.org_novaapis.yaml b/apis/bases/nova.openstack.org_novaapis.yaml similarity index 100% rename from api/bases/nova.openstack.org_novaapis.yaml rename to apis/bases/nova.openstack.org_novaapis.yaml diff --git a/api/bases/nova.openstack.org_novacells.yaml b/apis/bases/nova.openstack.org_novacells.yaml similarity index 100% rename from api/bases/nova.openstack.org_novacells.yaml rename to apis/bases/nova.openstack.org_novacells.yaml diff --git a/api/bases/nova.openstack.org_novacomputes.yaml b/apis/bases/nova.openstack.org_novacomputes.yaml similarity index 100% rename from api/bases/nova.openstack.org_novacomputes.yaml rename to apis/bases/nova.openstack.org_novacomputes.yaml diff --git a/api/bases/nova.openstack.org_novaconductors.yaml b/apis/bases/nova.openstack.org_novaconductors.yaml similarity index 100% rename from api/bases/nova.openstack.org_novaconductors.yaml rename to apis/bases/nova.openstack.org_novaconductors.yaml diff --git a/api/bases/nova.openstack.org_novametadata.yaml b/apis/bases/nova.openstack.org_novametadata.yaml similarity index 100% rename from api/bases/nova.openstack.org_novametadata.yaml rename to apis/bases/nova.openstack.org_novametadata.yaml diff --git a/api/bases/nova.openstack.org_novanovncproxies.yaml b/apis/bases/nova.openstack.org_novanovncproxies.yaml similarity index 100% rename from api/bases/nova.openstack.org_novanovncproxies.yaml rename to apis/bases/nova.openstack.org_novanovncproxies.yaml diff --git a/api/bases/nova.openstack.org_novaschedulers.yaml b/apis/bases/nova.openstack.org_novaschedulers.yaml similarity index 100% rename from api/bases/nova.openstack.org_novaschedulers.yaml rename to apis/bases/nova.openstack.org_novaschedulers.yaml diff --git a/api/go.mod b/apis/go.mod similarity index 98% rename from api/go.mod rename to apis/go.mod index f04888d39..dabf6468e 100644 --- a/api/go.mod +++ b/apis/go.mod @@ -1,4 +1,4 @@ -module github.com/openstack-k8s-operators/nova-operator/api +module github.com/openstack-k8s-operators/nova-operator/apis go 1.21 diff --git a/api/go.sum b/apis/go.sum similarity index 100% rename from api/go.sum rename to apis/go.sum diff --git a/api/v1beta1/common_types.go b/apis/nova/v1beta1/common_types.go similarity index 100% rename from api/v1beta1/common_types.go rename to apis/nova/v1beta1/common_types.go diff --git a/api/v1beta1/common_webhook.go b/apis/nova/v1beta1/common_webhook.go similarity index 100% rename from api/v1beta1/common_webhook.go rename to apis/nova/v1beta1/common_webhook.go diff --git a/api/v1beta1/conditions.go b/apis/nova/v1beta1/conditions.go similarity index 100% rename from api/v1beta1/conditions.go rename to apis/nova/v1beta1/conditions.go diff --git a/api/v1beta1/groupversion_info.go b/apis/nova/v1beta1/groupversion_info.go similarity index 100% rename from api/v1beta1/groupversion_info.go rename to apis/nova/v1beta1/groupversion_info.go diff --git a/api/v1beta1/nova_types.go b/apis/nova/v1beta1/nova_types.go similarity index 100% rename from api/v1beta1/nova_types.go rename to apis/nova/v1beta1/nova_types.go diff --git a/api/v1beta1/nova_webhook.go b/apis/nova/v1beta1/nova_webhook.go similarity index 100% rename from api/v1beta1/nova_webhook.go rename to apis/nova/v1beta1/nova_webhook.go diff --git a/api/v1beta1/novaapi_types.go b/apis/nova/v1beta1/novaapi_types.go similarity index 100% rename from api/v1beta1/novaapi_types.go rename to apis/nova/v1beta1/novaapi_types.go diff --git a/api/v1beta1/novaapi_webhook.go b/apis/nova/v1beta1/novaapi_webhook.go similarity index 100% rename from api/v1beta1/novaapi_webhook.go rename to apis/nova/v1beta1/novaapi_webhook.go diff --git a/api/v1beta1/novacell_types.go b/apis/nova/v1beta1/novacell_types.go similarity index 100% rename from api/v1beta1/novacell_types.go rename to apis/nova/v1beta1/novacell_types.go diff --git a/api/v1beta1/novacell_webhook.go b/apis/nova/v1beta1/novacell_webhook.go similarity index 100% rename from api/v1beta1/novacell_webhook.go rename to apis/nova/v1beta1/novacell_webhook.go diff --git a/api/v1beta1/novacompute_types.go b/apis/nova/v1beta1/novacompute_types.go similarity index 100% rename from api/v1beta1/novacompute_types.go rename to apis/nova/v1beta1/novacompute_types.go diff --git a/api/v1beta1/novacompute_webhook.go b/apis/nova/v1beta1/novacompute_webhook.go similarity index 100% rename from api/v1beta1/novacompute_webhook.go rename to apis/nova/v1beta1/novacompute_webhook.go diff --git a/api/v1beta1/novaconductor_types.go b/apis/nova/v1beta1/novaconductor_types.go similarity index 100% rename from api/v1beta1/novaconductor_types.go rename to apis/nova/v1beta1/novaconductor_types.go diff --git a/api/v1beta1/novaconductor_webhook.go b/apis/nova/v1beta1/novaconductor_webhook.go similarity index 100% rename from api/v1beta1/novaconductor_webhook.go rename to apis/nova/v1beta1/novaconductor_webhook.go diff --git a/api/v1beta1/novametadata_types.go b/apis/nova/v1beta1/novametadata_types.go similarity index 100% rename from api/v1beta1/novametadata_types.go rename to apis/nova/v1beta1/novametadata_types.go diff --git a/api/v1beta1/novametadata_webhook.go b/apis/nova/v1beta1/novametadata_webhook.go similarity index 100% rename from api/v1beta1/novametadata_webhook.go rename to apis/nova/v1beta1/novametadata_webhook.go diff --git a/api/v1beta1/novanovncproxy_types.go b/apis/nova/v1beta1/novanovncproxy_types.go similarity index 100% rename from api/v1beta1/novanovncproxy_types.go rename to apis/nova/v1beta1/novanovncproxy_types.go diff --git a/api/v1beta1/novanovncproxy_webhook.go b/apis/nova/v1beta1/novanovncproxy_webhook.go similarity index 100% rename from api/v1beta1/novanovncproxy_webhook.go rename to apis/nova/v1beta1/novanovncproxy_webhook.go diff --git a/api/v1beta1/novascheduler_types.go b/apis/nova/v1beta1/novascheduler_types.go similarity index 100% rename from api/v1beta1/novascheduler_types.go rename to apis/nova/v1beta1/novascheduler_types.go diff --git a/api/v1beta1/novascheduler_webhook.go b/apis/nova/v1beta1/novascheduler_webhook.go similarity index 100% rename from api/v1beta1/novascheduler_webhook.go rename to apis/nova/v1beta1/novascheduler_webhook.go diff --git a/api/v1beta1/zz_generated.deepcopy.go b/apis/nova/v1beta1/zz_generated.deepcopy.go similarity index 100% rename from api/v1beta1/zz_generated.deepcopy.go rename to apis/nova/v1beta1/zz_generated.deepcopy.go diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index 3541ef6b4..fac92d16a 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -15,24 +15,24 @@ resources: patches: # [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. # patches here are for enabling the conversion webhook for each CRD -#- path: patches/webhook_in_novaapis.yaml -#- path: patches/webhook_in_novaschedulers.yaml -#- path: patches/webhook_in_novaconductors.yaml -#- path: patches/webhook_in_novametadata.yaml -#- path: patches/webhook_in_novanovncproxies.yaml -#- path: patches/webhook_in_novacells.yaml -#- path: patches/webhook_in_nova.yaml +#- path: patches/webhook_in_nova_novaapis.yaml +#- path: patches/webhook_in_nova_novaschedulers.yaml +#- path: patches/webhook_in_nova_novaconductors.yaml +#- path: patches/webhook_in_nova_novametadata.yaml +#- path: patches/webhook_in_nova_novanovncproxies.yaml +#- path: patches/webhook_in_nova_novacells.yaml +#- path: patches/webhook_in_nova_nova.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD -#- path: patches/cainjection_in_novaapis.yaml -#- path: patches/cainjection_in_novaschedulers.yaml -#- path: patches/cainjection_in_novaconductors.yaml -#- path: patches/cainjection_in_novametadata.yaml -#- path: patches/cainjection_in_novanovncproxies.yaml -#- path: patches/cainjection_in_novacells.yaml -#- path: patches/cainjection_in_nova.yaml +#- path: patches/cainjection_in_nova_novaapis.yaml +#- path: patches/cainjection_in_nova_novaschedulers.yaml +#- path: patches/cainjection_in_nova_novaconductors.yaml +#- path: patches/cainjection_in_nova_novametadata.yaml +#- path: patches/cainjection_in_nova_novanovncproxies.yaml +#- path: patches/cainjection_in_nova_novacells.yaml +#- path: patches/cainjection_in_nova_nova.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_nova.yaml b/config/crd/patches/cainjection_in_nova_nova.yaml similarity index 100% rename from config/crd/patches/cainjection_in_nova.yaml rename to config/crd/patches/cainjection_in_nova_nova.yaml diff --git a/config/crd/patches/cainjection_in_novaapis.yaml b/config/crd/patches/cainjection_in_nova_novaapis.yaml similarity index 100% rename from config/crd/patches/cainjection_in_novaapis.yaml rename to config/crd/patches/cainjection_in_nova_novaapis.yaml diff --git a/config/crd/patches/cainjection_in_novacells.yaml b/config/crd/patches/cainjection_in_nova_novacells.yaml similarity index 100% rename from config/crd/patches/cainjection_in_novacells.yaml rename to config/crd/patches/cainjection_in_nova_novacells.yaml diff --git a/config/crd/patches/cainjection_in_novacomputes.yaml b/config/crd/patches/cainjection_in_nova_novacomputes.yaml similarity index 100% rename from config/crd/patches/cainjection_in_novacomputes.yaml rename to config/crd/patches/cainjection_in_nova_novacomputes.yaml diff --git a/config/crd/patches/cainjection_in_novaconductors.yaml b/config/crd/patches/cainjection_in_nova_novaconductors.yaml similarity index 100% rename from config/crd/patches/cainjection_in_novaconductors.yaml rename to config/crd/patches/cainjection_in_nova_novaconductors.yaml diff --git a/config/crd/patches/cainjection_in_novametadata.yaml b/config/crd/patches/cainjection_in_nova_novametadata.yaml similarity index 100% rename from config/crd/patches/cainjection_in_novametadata.yaml rename to config/crd/patches/cainjection_in_nova_novametadata.yaml diff --git a/config/crd/patches/cainjection_in_novanovncproxies.yaml b/config/crd/patches/cainjection_in_nova_novanovncproxies.yaml similarity index 100% rename from config/crd/patches/cainjection_in_novanovncproxies.yaml rename to config/crd/patches/cainjection_in_nova_novanovncproxies.yaml diff --git a/config/crd/patches/cainjection_in_novaschedulers.yaml b/config/crd/patches/cainjection_in_nova_novaschedulers.yaml similarity index 100% rename from config/crd/patches/cainjection_in_novaschedulers.yaml rename to config/crd/patches/cainjection_in_nova_novaschedulers.yaml diff --git a/config/crd/patches/webhook_in_nova.yaml b/config/crd/patches/webhook_in_nova_nova.yaml similarity index 100% rename from config/crd/patches/webhook_in_nova.yaml rename to config/crd/patches/webhook_in_nova_nova.yaml diff --git a/config/crd/patches/webhook_in_novaapis.yaml b/config/crd/patches/webhook_in_nova_novaapis.yaml similarity index 100% rename from config/crd/patches/webhook_in_novaapis.yaml rename to config/crd/patches/webhook_in_nova_novaapis.yaml diff --git a/config/crd/patches/webhook_in_novacells.yaml b/config/crd/patches/webhook_in_nova_novacells.yaml similarity index 100% rename from config/crd/patches/webhook_in_novacells.yaml rename to config/crd/patches/webhook_in_nova_novacells.yaml diff --git a/config/crd/patches/webhook_in_novacomputes.yaml b/config/crd/patches/webhook_in_nova_novacomputes.yaml similarity index 100% rename from config/crd/patches/webhook_in_novacomputes.yaml rename to config/crd/patches/webhook_in_nova_novacomputes.yaml diff --git a/config/crd/patches/webhook_in_novaconductors.yaml b/config/crd/patches/webhook_in_nova_novaconductors.yaml similarity index 100% rename from config/crd/patches/webhook_in_novaconductors.yaml rename to config/crd/patches/webhook_in_nova_novaconductors.yaml diff --git a/config/crd/patches/webhook_in_novametadata.yaml b/config/crd/patches/webhook_in_nova_novametadata.yaml similarity index 100% rename from config/crd/patches/webhook_in_novametadata.yaml rename to config/crd/patches/webhook_in_nova_novametadata.yaml diff --git a/config/crd/patches/webhook_in_novanovncproxies.yaml b/config/crd/patches/webhook_in_nova_novanovncproxies.yaml similarity index 100% rename from config/crd/patches/webhook_in_novanovncproxies.yaml rename to config/crd/patches/webhook_in_nova_novanovncproxies.yaml diff --git a/config/crd/patches/webhook_in_novaschedulers.yaml b/config/crd/patches/webhook_in_nova_novaschedulers.yaml similarity index 100% rename from config/crd/patches/webhook_in_novaschedulers.yaml rename to config/crd/patches/webhook_in_nova_novaschedulers.yaml diff --git a/controllers/common.go b/controllers/nova/common.go similarity index 99% rename from controllers/common.go rename to controllers/nova/common.go index b8998c63d..7672ccbf7 100644 --- a/controllers/common.go +++ b/controllers/nova/common.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/log" memcachedv1 "github.com/openstack-k8s-operators/infra-operator/apis/memcached/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" gophercloud "github.com/gophercloud/gophercloud" diff --git a/controllers/nova_controller.go b/controllers/nova/nova_controller.go similarity index 99% rename from controllers/nova_controller.go rename to controllers/nova/nova_controller.go index a37375b42..9cf4c3fd9 100644 --- a/controllers/nova_controller.go +++ b/controllers/nova/nova_controller.go @@ -53,7 +53,7 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/tls" util "github.com/openstack-k8s-operators/lib-common/modules/common/util" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" "github.com/openstack-k8s-operators/nova-operator/pkg/novaapi" diff --git a/controllers/novaapi_controller.go b/controllers/nova/novaapi_controller.go similarity index 99% rename from controllers/novaapi_controller.go rename to controllers/nova/novaapi_controller.go index 731c46e13..465596a5a 100644 --- a/controllers/novaapi_controller.go +++ b/controllers/nova/novaapi_controller.go @@ -52,7 +52,7 @@ import ( topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" keystonev1 "github.com/openstack-k8s-operators/keystone-operator/api/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" "github.com/openstack-k8s-operators/nova-operator/pkg/novaapi" diff --git a/controllers/novacell_controller.go b/controllers/nova/novacell_controller.go similarity index 99% rename from controllers/novacell_controller.go rename to controllers/nova/novacell_controller.go index 08f5625ef..41eb8334c 100644 --- a/controllers/novacell_controller.go +++ b/controllers/nova/novacell_controller.go @@ -46,7 +46,7 @@ import ( util "github.com/openstack-k8s-operators/lib-common/modules/common/util" topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" ) // NovaCellReconciler reconciles a NovaCell object diff --git a/controllers/novacompute_controller.go b/controllers/nova/novacompute_controller.go similarity index 99% rename from controllers/novacompute_controller.go rename to controllers/nova/novacompute_controller.go index a21e25d1b..8eafd811a 100644 --- a/controllers/novacompute_controller.go +++ b/controllers/nova/novacompute_controller.go @@ -46,7 +46,7 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/tls" util "github.com/openstack-k8s-operators/lib-common/modules/common/util" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" "github.com/openstack-k8s-operators/nova-operator/pkg/novacompute" diff --git a/controllers/novaconductor_controller.go b/controllers/nova/novaconductor_controller.go similarity index 99% rename from controllers/novaconductor_controller.go rename to controllers/nova/novaconductor_controller.go index f0bf4fa97..393298e21 100644 --- a/controllers/novaconductor_controller.go +++ b/controllers/nova/novaconductor_controller.go @@ -53,7 +53,7 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/tls" util "github.com/openstack-k8s-operators/lib-common/modules/common/util" mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/novaconductor" ) diff --git a/controllers/novametadata_controller.go b/controllers/nova/novametadata_controller.go similarity index 99% rename from controllers/novametadata_controller.go rename to controllers/nova/novametadata_controller.go index b58ed7747..20481539a 100644 --- a/controllers/novametadata_controller.go +++ b/controllers/nova/novametadata_controller.go @@ -52,7 +52,7 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/tls" util "github.com/openstack-k8s-operators/lib-common/modules/common/util" mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" "github.com/openstack-k8s-operators/nova-operator/pkg/novametadata" k8s_errors "k8s.io/apimachinery/pkg/api/errors" diff --git a/controllers/novanovncproxy_controller.go b/controllers/nova/novanovncproxy_controller.go similarity index 99% rename from controllers/novanovncproxy_controller.go rename to controllers/nova/novanovncproxy_controller.go index 8f76a78c0..68f90438e 100644 --- a/controllers/novanovncproxy_controller.go +++ b/controllers/nova/novanovncproxy_controller.go @@ -50,7 +50,7 @@ import ( "github.com/openstack-k8s-operators/lib-common/modules/common/tls" util "github.com/openstack-k8s-operators/lib-common/modules/common/util" mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" "github.com/openstack-k8s-operators/nova-operator/pkg/novncproxy" k8s_errors "k8s.io/apimachinery/pkg/api/errors" diff --git a/controllers/novascheduler_controller.go b/controllers/nova/novascheduler_controller.go similarity index 99% rename from controllers/novascheduler_controller.go rename to controllers/nova/novascheduler_controller.go index 6dbcdda0f..6ba700ba0 100644 --- a/controllers/novascheduler_controller.go +++ b/controllers/nova/novascheduler_controller.go @@ -51,7 +51,7 @@ import ( mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" "github.com/openstack-k8s-operators/nova-operator/pkg/novascheduler" ) diff --git a/go.mod b/go.mod index 01f144c56..1ddfc71bc 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/openstack-k8s-operators/lib-common/modules/openstack v0.6.1-0.20250730071847-837b07f8d72f github.com/openstack-k8s-operators/lib-common/modules/test v0.6.1-0.20250730071847-837b07f8d72f github.com/openstack-k8s-operators/mariadb-operator/api v0.6.1-0.20250811132527-8b60a24b4cd5 - github.com/openstack-k8s-operators/nova-operator/api v0.0.0-20221209164002-f9e6b9363961 + github.com/openstack-k8s-operators/nova-operator/apis v0.0.0-00010101000000-000000000000 go.uber.org/zap v1.27.0 golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 gopkg.in/yaml.v3 v3.0.1 @@ -85,7 +85,7 @@ require ( sigs.k8s.io/yaml v1.4.0 // indirect ) -replace github.com/openstack-k8s-operators/nova-operator/api => ./api +replace github.com/openstack-k8s-operators/nova-operator/apis => ./apis // mschuppert: map to latest commit from release-4.16 tag // must consistent within modules and service operators diff --git a/main.go b/main.go index e8f263661..dc6b8fc8c 100644 --- a/main.go +++ b/main.go @@ -51,9 +51,9 @@ import ( mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" - "github.com/openstack-k8s-operators/nova-operator/controllers" + nova_ctrl "github.com/openstack-k8s-operators/nova-operator/controllers/nova" //+kubebuilder:scaffold:imports ) @@ -145,7 +145,7 @@ func main() { os.Exit(1) } - reconcilers := controllers.NewReconcilers(mgr, kclient) + reconcilers := nova_ctrl.NewReconcilers(mgr, kclient) err = reconcilers.Setup(mgr, setupLog) if err != nil { os.Exit(1) diff --git a/pkg/nova/celldelete.go b/pkg/nova/celldelete.go index c6033ee23..e2d2b6548 100644 --- a/pkg/nova/celldelete.go +++ b/pkg/nova/celldelete.go @@ -7,7 +7,7 @@ import ( "k8s.io/utils/ptr" "github.com/openstack-k8s-operators/lib-common/modules/common/env" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" ) func CellDeleteJob( diff --git a/pkg/nova/cellmapping.go b/pkg/nova/cellmapping.go index e5349e401..c88432320 100644 --- a/pkg/nova/cellmapping.go +++ b/pkg/nova/cellmapping.go @@ -7,7 +7,7 @@ import ( "k8s.io/utils/ptr" "github.com/openstack-k8s-operators/lib-common/modules/common/env" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" ) func CellMappingJob( diff --git a/pkg/nova/host_discover.go b/pkg/nova/host_discover.go index 471316a24..464aec5a4 100644 --- a/pkg/nova/host_discover.go +++ b/pkg/nova/host_discover.go @@ -16,7 +16,7 @@ package nova import ( env "github.com/openstack-k8s-operators/lib-common/modules/common/env" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" diff --git a/pkg/novaapi/deployment.go b/pkg/novaapi/deployment.go index 839afff25..9aa3c5ca3 100644 --- a/pkg/novaapi/deployment.go +++ b/pkg/novaapi/deployment.go @@ -24,7 +24,7 @@ import ( env "github.com/openstack-k8s-operators/lib-common/modules/common/env" "github.com/openstack-k8s-operators/lib-common/modules/common/service" "github.com/openstack-k8s-operators/lib-common/modules/common/tls" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" appsv1 "k8s.io/api/apps/v1" diff --git a/pkg/novacompute/deployment.go b/pkg/novacompute/deployment.go index e1abc922d..d076d1db2 100644 --- a/pkg/novacompute/deployment.go +++ b/pkg/novacompute/deployment.go @@ -20,7 +20,7 @@ import ( common "github.com/openstack-k8s-operators/lib-common/modules/common" affinity "github.com/openstack-k8s-operators/lib-common/modules/common/affinity" env "github.com/openstack-k8s-operators/lib-common/modules/common/env" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" diff --git a/pkg/novaconductor/dbpurge.go b/pkg/novaconductor/dbpurge.go index 2c27bae83..c5d8a7c82 100644 --- a/pkg/novaconductor/dbpurge.go +++ b/pkg/novaconductor/dbpurge.go @@ -11,7 +11,7 @@ import ( memcachedv1 "github.com/openstack-k8s-operators/infra-operator/apis/memcached/v1beta1" "github.com/openstack-k8s-operators/lib-common/modules/common/env" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" ) diff --git a/pkg/novaconductor/dbsync.go b/pkg/novaconductor/dbsync.go index ce8b31939..37d36084c 100644 --- a/pkg/novaconductor/dbsync.go +++ b/pkg/novaconductor/dbsync.go @@ -17,7 +17,7 @@ limitations under the License. package novaconductor import ( - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" env "github.com/openstack-k8s-operators/lib-common/modules/common/env" diff --git a/pkg/novaconductor/deployment.go b/pkg/novaconductor/deployment.go index 4cc73351b..e12d8d87d 100644 --- a/pkg/novaconductor/deployment.go +++ b/pkg/novaconductor/deployment.go @@ -22,7 +22,7 @@ import ( common "github.com/openstack-k8s-operators/lib-common/modules/common" affinity "github.com/openstack-k8s-operators/lib-common/modules/common/affinity" env "github.com/openstack-k8s-operators/lib-common/modules/common/env" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" appsv1 "k8s.io/api/apps/v1" diff --git a/pkg/novametadata/deployment.go b/pkg/novametadata/deployment.go index 795c01dcb..3b9b0c863 100644 --- a/pkg/novametadata/deployment.go +++ b/pkg/novametadata/deployment.go @@ -22,7 +22,7 @@ import ( common "github.com/openstack-k8s-operators/lib-common/modules/common" affinity "github.com/openstack-k8s-operators/lib-common/modules/common/affinity" env "github.com/openstack-k8s-operators/lib-common/modules/common/env" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" appsv1 "k8s.io/api/apps/v1" diff --git a/pkg/novascheduler/deployment.go b/pkg/novascheduler/deployment.go index ee0228a4c..bf3a1433a 100644 --- a/pkg/novascheduler/deployment.go +++ b/pkg/novascheduler/deployment.go @@ -20,7 +20,7 @@ import ( common "github.com/openstack-k8s-operators/lib-common/modules/common" affinity "github.com/openstack-k8s-operators/lib-common/modules/common/affinity" env "github.com/openstack-k8s-operators/lib-common/modules/common/env" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" memcachedv1 "github.com/openstack-k8s-operators/infra-operator/apis/memcached/v1beta1" diff --git a/pkg/novncproxy/deployment.go b/pkg/novncproxy/deployment.go index c7c9b7613..15b94805e 100644 --- a/pkg/novncproxy/deployment.go +++ b/pkg/novncproxy/deployment.go @@ -22,7 +22,7 @@ import ( common "github.com/openstack-k8s-operators/lib-common/modules/common" affinity "github.com/openstack-k8s-operators/lib-common/modules/common/affinity" env "github.com/openstack-k8s-operators/lib-common/modules/common/env" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" "github.com/openstack-k8s-operators/nova-operator/pkg/nova" appsv1 "k8s.io/api/apps/v1" diff --git a/test/functional/api_fixture.go b/test/functional/nova/api_fixture.go similarity index 100% rename from test/functional/api_fixture.go rename to test/functional/nova/api_fixture.go diff --git a/test/functional/base_test.go b/test/functional/nova/base_test.go similarity index 99% rename from test/functional/base_test.go rename to test/functional/nova/base_test.go index f64e37b44..46c0ecbec 100644 --- a/test/functional/base_test.go +++ b/test/functional/nova/base_test.go @@ -36,7 +36,7 @@ import ( keystonev1 "github.com/openstack-k8s-operators/keystone-operator/api/v1beta1" condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/test/functional/nova_compute_ironic_controller_test.go b/test/functional/nova/nova_compute_ironic_controller_test.go similarity index 99% rename from test/functional/nova_compute_ironic_controller_test.go rename to test/functional/nova/nova_compute_ironic_controller_test.go index f325f34e6..0a229cd4e 100644 --- a/test/functional/nova_compute_ironic_controller_test.go +++ b/test/functional/nova/nova_compute_ironic_controller_test.go @@ -32,7 +32,7 @@ import ( condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/util" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" ) var _ = Describe("NovaCompute controller", func() { diff --git a/test/functional/nova_controller_test.go b/test/functional/nova/nova_controller_test.go similarity index 99% rename from test/functional/nova_controller_test.go rename to test/functional/nova/nova_controller_test.go index 0885d82d1..22bfe8be4 100644 --- a/test/functional/nova_controller_test.go +++ b/test/functional/nova/nova_controller_test.go @@ -34,8 +34,8 @@ import ( condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/util" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" - "github.com/openstack-k8s-operators/nova-operator/controllers" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" + nova_ctrl "github.com/openstack-k8s-operators/nova-operator/controllers/nova" ) var _ = Describe("Nova controller - notifications", func() { @@ -360,7 +360,7 @@ var _ = Describe("Nova controller", func() { internalCellSecret := th.GetSecret(cell0.InternalCellSecretName) Expect(internalCellSecret.Data).To(HaveLen(3)) Expect(internalCellSecret.Data).To( - HaveKeyWithValue(controllers.ServicePasswordSelector, []byte("service-password"))) + HaveKeyWithValue(nova_ctrl.ServicePasswordSelector, []byte("service-password"))) Expect(internalCellSecret.Data).To( HaveKeyWithValue("transport_url", []byte("rabbit://cell0/fake"))) Expect(internalCellSecret.Data).To( @@ -476,9 +476,9 @@ var _ = Describe("Nova controller", func() { internalTopLevelSecret := th.GetSecret(novaNames.InternalTopLevelSecretName) Expect(internalTopLevelSecret.Data).To(HaveLen(4)) Expect(internalTopLevelSecret.Data).To( - HaveKeyWithValue(controllers.ServicePasswordSelector, []byte("service-password"))) + HaveKeyWithValue(nova_ctrl.ServicePasswordSelector, []byte("service-password"))) Expect(internalTopLevelSecret.Data).To( - HaveKeyWithValue(controllers.MetadataSecretSelector, []byte("metadata-secret"))) + HaveKeyWithValue(nova_ctrl.MetadataSecretSelector, []byte("metadata-secret"))) Expect(internalTopLevelSecret.Data).To( HaveKeyWithValue("transport_url", []byte("rabbit://cell0/fake"))) Expect(internalTopLevelSecret.Data).To( diff --git a/test/functional/nova_metadata_controller_test.go b/test/functional/nova/nova_metadata_controller_test.go similarity index 99% rename from test/functional/nova_metadata_controller_test.go rename to test/functional/nova/nova_metadata_controller_test.go index 7e5dd279a..e0bc5d840 100644 --- a/test/functional/nova_metadata_controller_test.go +++ b/test/functional/nova/nova_metadata_controller_test.go @@ -36,7 +36,7 @@ import ( topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/util" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" ) var _ = Describe("NovaMetadata controller", func() { diff --git a/test/functional/nova_multicell_test.go b/test/functional/nova/nova_multicell_test.go similarity index 98% rename from test/functional/nova_multicell_test.go rename to test/functional/nova/nova_multicell_test.go index 589d08cdd..03e3cf7aa 100644 --- a/test/functional/nova_multicell_test.go +++ b/test/functional/nova/nova_multicell_test.go @@ -26,8 +26,8 @@ import ( condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" - "github.com/openstack-k8s-operators/nova-operator/controllers" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" + nova_ctrl "github.com/openstack-k8s-operators/nova-operator/controllers/nova" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -923,12 +923,12 @@ var _ = Describe("Nova multi cell", func() { cell1Secret := th.GetSecret(cell1.InternalCellSecretName) Expect(cell1Secret.Data).To( - HaveKeyWithValue(controllers.MetadataSecretSelector, []byte("metadata-secret-cell1"))) + HaveKeyWithValue(nova_ctrl.MetadataSecretSelector, []byte("metadata-secret-cell1"))) cell0Secret := th.GetSecret(cell0.InternalCellSecretName) Expect(cell0Secret.Data).NotTo( - HaveKeyWithValue(controllers.MetadataSecretSelector, []byte("metadata-secret"))) + HaveKeyWithValue(nova_ctrl.MetadataSecretSelector, []byte("metadata-secret"))) Expect(cell0Secret.Data).NotTo( - HaveKeyWithValue(controllers.MetadataSecretSelector, []byte("metadata-secret-cell1"))) + HaveKeyWithValue(nova_ctrl.MetadataSecretSelector, []byte("metadata-secret-cell1"))) configDataMap := th.GetSecret(cell1.MetadataConfigDataName) Expect(configDataMap).ShouldNot(BeNil()) Expect(configDataMap.Data).Should(HaveKey("httpd.conf")) @@ -1011,7 +1011,7 @@ var _ = Describe("Nova multi cell deletion", func() { "cell-3", } - controllers.SortNovaCellListByName(cellList) + nova_ctrl.SortNovaCellListByName(cellList) actualList := []string{} for _, cell := range cellList.Items { diff --git a/test/functional/nova_novncproxy_test.go b/test/functional/nova/nova_novncproxy_test.go similarity index 99% rename from test/functional/nova_novncproxy_test.go rename to test/functional/nova/nova_novncproxy_test.go index 4af454621..29371a762 100644 --- a/test/functional/nova_novncproxy_test.go +++ b/test/functional/nova/nova_novncproxy_test.go @@ -29,7 +29,7 @@ import ( condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/util" mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" diff --git a/test/functional/nova_reconfiguration_test.go b/test/functional/nova/nova_reconfiguration_test.go similarity index 99% rename from test/functional/nova_reconfiguration_test.go rename to test/functional/nova/nova_reconfiguration_test.go index 0580102d3..eb6001537 100644 --- a/test/functional/nova_reconfiguration_test.go +++ b/test/functional/nova/nova_reconfiguration_test.go @@ -31,7 +31,7 @@ import ( topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" diff --git a/test/functional/nova_scheduler_test.go b/test/functional/nova/nova_scheduler_test.go similarity index 99% rename from test/functional/nova_scheduler_test.go rename to test/functional/nova/nova_scheduler_test.go index 7dc2ee10a..c46e39a97 100644 --- a/test/functional/nova_scheduler_test.go +++ b/test/functional/nova/nova_scheduler_test.go @@ -29,7 +29,7 @@ import ( condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/util" mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" diff --git a/test/functional/novaapi_controller_test.go b/test/functional/nova/novaapi_controller_test.go similarity index 99% rename from test/functional/novaapi_controller_test.go rename to test/functional/nova/novaapi_controller_test.go index bb779474d..e54a4a557 100644 --- a/test/functional/novaapi_controller_test.go +++ b/test/functional/nova/novaapi_controller_test.go @@ -31,7 +31,7 @@ import ( condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/util" mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" diff --git a/test/functional/novacell_controller_test.go b/test/functional/nova/novacell_controller_test.go similarity index 99% rename from test/functional/novacell_controller_test.go rename to test/functional/nova/novacell_controller_test.go index 0e1a18868..98a19bda1 100644 --- a/test/functional/novacell_controller_test.go +++ b/test/functional/nova/novacell_controller_test.go @@ -28,7 +28,7 @@ import ( condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/service" mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/utils/ptr" diff --git a/test/functional/novaconductor_controller_test.go b/test/functional/nova/novaconductor_controller_test.go similarity index 99% rename from test/functional/novaconductor_controller_test.go rename to test/functional/nova/novaconductor_controller_test.go index 7bd29cb51..50101f160 100644 --- a/test/functional/novaconductor_controller_test.go +++ b/test/functional/nova/novaconductor_controller_test.go @@ -30,7 +30,7 @@ import ( condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" "github.com/openstack-k8s-operators/lib-common/modules/common/util" mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" diff --git a/test/functional/sample_test.go b/test/functional/nova/sample_test.go similarity index 99% rename from test/functional/sample_test.go rename to test/functional/nova/sample_test.go index eff1aa90e..f33358b4e 100644 --- a/test/functional/sample_test.go +++ b/test/functional/nova/sample_test.go @@ -25,7 +25,7 @@ import ( "k8s.io/apimachinery/pkg/types" ) -const SamplesDir = "../../config/samples/" +const SamplesDir = "../../../config/samples/" func ReadSample(sampleFileName string) map[string]interface{} { rawSample := make(map[string]interface{}) diff --git a/test/functional/suite_test.go b/test/functional/nova/suite_test.go similarity index 96% rename from test/functional/suite_test.go rename to test/functional/nova/suite_test.go index 15902c1ad..4f379a47d 100644 --- a/test/functional/suite_test.go +++ b/test/functional/nova/suite_test.go @@ -55,8 +55,8 @@ import ( keystonev1 "github.com/openstack-k8s-operators/keystone-operator/api/v1beta1" test "github.com/openstack-k8s-operators/lib-common/modules/test" mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" - novav1 "github.com/openstack-k8s-operators/nova-operator/api/v1beta1" - "github.com/openstack-k8s-operators/nova-operator/controllers" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" + nova_ctrl "github.com/openstack-k8s-operators/nova-operator/controllers/nova" infra_test "github.com/openstack-k8s-operators/infra-operator/apis/test/helpers" topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" @@ -100,7 +100,7 @@ var _ = BeforeSuite(func() { ctx, cancel = context.WithCancel(context.TODO()) - const gomod = "../../go.mod" + const gomod = "../../../go.mod" keystoneCRDs, err := test.GetCRDDirFromModule( "github.com/openstack-k8s-operators/keystone-operator/api", gomod, "bases") @@ -120,7 +120,7 @@ var _ = BeforeSuite(func() { By("bootstrapping test environment") testEnv = &envtest.Environment{ CRDDirectoryPaths: []string{ - filepath.Join("..", "..", "config", "crd", "bases"), + filepath.Join("..", "..", "..", "config", "crd", "bases"), // NOTE(gibi): we need to list all the external CRDs our operator depends on mariadbCRDs, keystoneCRDs, @@ -133,7 +133,7 @@ var _ = BeforeSuite(func() { }, ErrorIfCRDPathMissing: true, WebhookInstallOptions: envtest.WebhookInstallOptions{ - Paths: []string{filepath.Join("..", "..", "config", "webhook")}, + Paths: []string{filepath.Join("..", "..", "..", "config", "webhook")}, // NOTE(gibi): if localhost is resolved to ::1 (ipv6) then starting // the webhook fails as it try to parse the address as ipv4 and // failing on the colons in ::1 @@ -210,7 +210,7 @@ var _ = BeforeSuite(func() { kclient, err := kubernetes.NewForConfig(cfg) Expect(err).ToNot(HaveOccurred(), "failed to create kclient") - reconcilers := controllers.NewReconcilers(k8sManager, kclient) + reconcilers := nova_ctrl.NewReconcilers(k8sManager, kclient) // NOTE(gibi): During envtest we simulate success of tasks (e.g Job, // Deployment, DB) so we can speed up the test execution by reducing the // time we wait before we reconcile when a task is running. diff --git a/test/functional/validation_webhook_test.go b/test/functional/nova/validation_webhook_test.go similarity index 100% rename from test/functional/validation_webhook_test.go rename to test/functional/nova/validation_webhook_test.go diff --git a/test/kuttl/test-suites/default/cell-tests/00-cleanup-nova.yaml b/test/kuttl/nova/test-suites/default/cell-tests/00-cleanup-nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/cell-tests/00-cleanup-nova.yaml rename to test/kuttl/nova/test-suites/default/cell-tests/00-cleanup-nova.yaml diff --git a/test/kuttl/test-suites/default/cell-tests/01-assert.yaml b/test/kuttl/nova/test-suites/default/cell-tests/01-assert.yaml similarity index 100% rename from test/kuttl/test-suites/default/cell-tests/01-assert.yaml rename to test/kuttl/nova/test-suites/default/cell-tests/01-assert.yaml diff --git a/test/kuttl/test-suites/default/cell-tests/01-deploy.yaml b/test/kuttl/nova/test-suites/default/cell-tests/01-deploy.yaml similarity index 100% rename from test/kuttl/test-suites/default/cell-tests/01-deploy.yaml rename to test/kuttl/nova/test-suites/default/cell-tests/01-deploy.yaml diff --git a/test/kuttl/test-suites/default/cell-tests/02-assert.yaml b/test/kuttl/nova/test-suites/default/cell-tests/02-assert.yaml similarity index 100% rename from test/kuttl/test-suites/default/cell-tests/02-assert.yaml rename to test/kuttl/nova/test-suites/default/cell-tests/02-assert.yaml diff --git a/test/kuttl/test-suites/default/cell-tests/02-delete-cell-nova.yaml b/test/kuttl/nova/test-suites/default/cell-tests/02-delete-cell-nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/cell-tests/02-delete-cell-nova.yaml rename to test/kuttl/nova/test-suites/default/cell-tests/02-delete-cell-nova.yaml diff --git a/test/kuttl/test-suites/default/cell-tests/03-cleanup-nova.yaml b/test/kuttl/nova/test-suites/default/cell-tests/03-cleanup-nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/cell-tests/03-cleanup-nova.yaml rename to test/kuttl/nova/test-suites/default/cell-tests/03-cleanup-nova.yaml diff --git a/test/kuttl/test-suites/default/common/cleanup-nova.yaml b/test/kuttl/nova/test-suites/default/common/cleanup-nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/common/cleanup-nova.yaml rename to test/kuttl/nova/test-suites/default/common/cleanup-nova.yaml diff --git a/test/kuttl/test-suites/default/config-tests/00-cleanup-nova.yaml b/test/kuttl/nova/test-suites/default/config-tests/00-cleanup-nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/config-tests/00-cleanup-nova.yaml rename to test/kuttl/nova/test-suites/default/config-tests/00-cleanup-nova.yaml diff --git a/test/kuttl/test-suites/default/config-tests/01-assert.yaml b/test/kuttl/nova/test-suites/default/config-tests/01-assert.yaml similarity index 100% rename from test/kuttl/test-suites/default/config-tests/01-assert.yaml rename to test/kuttl/nova/test-suites/default/config-tests/01-assert.yaml diff --git a/test/kuttl/test-suites/default/config-tests/01-deploy-with-default-config-overwrite.yaml b/test/kuttl/nova/test-suites/default/config-tests/01-deploy-with-default-config-overwrite.yaml similarity index 100% rename from test/kuttl/test-suites/default/config-tests/01-deploy-with-default-config-overwrite.yaml rename to test/kuttl/nova/test-suites/default/config-tests/01-deploy-with-default-config-overwrite.yaml diff --git a/test/kuttl/test-suites/default/config-tests/02-assert.yaml b/test/kuttl/nova/test-suites/default/config-tests/02-assert.yaml similarity index 100% rename from test/kuttl/test-suites/default/config-tests/02-assert.yaml rename to test/kuttl/nova/test-suites/default/config-tests/02-assert.yaml diff --git a/test/kuttl/test-suites/default/config-tests/02-enable-notifications.yaml b/test/kuttl/nova/test-suites/default/config-tests/02-enable-notifications.yaml similarity index 100% rename from test/kuttl/test-suites/default/config-tests/02-enable-notifications.yaml rename to test/kuttl/nova/test-suites/default/config-tests/02-enable-notifications.yaml diff --git a/test/kuttl/test-suites/default/config-tests/03-cleanup-nova.yaml b/test/kuttl/nova/test-suites/default/config-tests/03-cleanup-nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/config-tests/03-cleanup-nova.yaml rename to test/kuttl/nova/test-suites/default/config-tests/03-cleanup-nova.yaml diff --git a/test/kuttl/test-suites/default/config.yaml b/test/kuttl/nova/test-suites/default/config.yaml similarity index 100% rename from test/kuttl/test-suites/default/config.yaml rename to test/kuttl/nova/test-suites/default/config.yaml diff --git a/test/kuttl/test-suites/default/deps/OpenStackControlPlane.yaml b/test/kuttl/nova/test-suites/default/deps/OpenStackControlPlane.yaml similarity index 100% rename from test/kuttl/test-suites/default/deps/OpenStackControlPlane.yaml rename to test/kuttl/nova/test-suites/default/deps/OpenStackControlPlane.yaml diff --git a/test/kuttl/test-suites/default/deps/infra.yaml b/test/kuttl/nova/test-suites/default/deps/infra.yaml similarity index 100% rename from test/kuttl/test-suites/default/deps/infra.yaml rename to test/kuttl/nova/test-suites/default/deps/infra.yaml diff --git a/test/kuttl/test-suites/default/deps/keystone.yaml b/test/kuttl/nova/test-suites/default/deps/keystone.yaml similarity index 100% rename from test/kuttl/test-suites/default/deps/keystone.yaml rename to test/kuttl/nova/test-suites/default/deps/keystone.yaml diff --git a/test/kuttl/test-suites/default/deps/kustomization.yaml b/test/kuttl/nova/test-suites/default/deps/kustomization.yaml similarity index 100% rename from test/kuttl/test-suites/default/deps/kustomization.yaml rename to test/kuttl/nova/test-suites/default/deps/kustomization.yaml diff --git a/test/kuttl/test-suites/default/deps/namespace.yaml b/test/kuttl/nova/test-suites/default/deps/namespace.yaml similarity index 100% rename from test/kuttl/test-suites/default/deps/namespace.yaml rename to test/kuttl/nova/test-suites/default/deps/namespace.yaml diff --git a/test/kuttl/test-suites/default/deps/nova.yaml b/test/kuttl/nova/test-suites/default/deps/nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/deps/nova.yaml rename to test/kuttl/nova/test-suites/default/deps/nova.yaml diff --git a/test/kuttl/test-suites/default/deps/placement.yaml b/test/kuttl/nova/test-suites/default/deps/placement.yaml similarity index 100% rename from test/kuttl/test-suites/default/deps/placement.yaml rename to test/kuttl/nova/test-suites/default/deps/placement.yaml diff --git a/test/kuttl/test-suites/default/output/.keep b/test/kuttl/nova/test-suites/default/output/.keep similarity index 100% rename from test/kuttl/test-suites/default/output/.keep rename to test/kuttl/nova/test-suites/default/output/.keep diff --git a/test/kuttl/test-suites/default/scale-tests/00-cleanup-nova.yaml b/test/kuttl/nova/test-suites/default/scale-tests/00-cleanup-nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/scale-tests/00-cleanup-nova.yaml rename to test/kuttl/nova/test-suites/default/scale-tests/00-cleanup-nova.yaml diff --git a/test/kuttl/test-suites/default/scale-tests/01-assert.yaml b/test/kuttl/nova/test-suites/default/scale-tests/01-assert.yaml similarity index 100% rename from test/kuttl/test-suites/default/scale-tests/01-assert.yaml rename to test/kuttl/nova/test-suites/default/scale-tests/01-assert.yaml diff --git a/test/kuttl/test-suites/default/scale-tests/01-deploy.yaml b/test/kuttl/nova/test-suites/default/scale-tests/01-deploy.yaml similarity index 100% rename from test/kuttl/test-suites/default/scale-tests/01-deploy.yaml rename to test/kuttl/nova/test-suites/default/scale-tests/01-deploy.yaml diff --git a/test/kuttl/test-suites/default/scale-tests/02-assert.yaml b/test/kuttl/nova/test-suites/default/scale-tests/02-assert.yaml similarity index 100% rename from test/kuttl/test-suites/default/scale-tests/02-assert.yaml rename to test/kuttl/nova/test-suites/default/scale-tests/02-assert.yaml diff --git a/test/kuttl/test-suites/default/scale-tests/02-scale-up-nova.yaml b/test/kuttl/nova/test-suites/default/scale-tests/02-scale-up-nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/scale-tests/02-scale-up-nova.yaml rename to test/kuttl/nova/test-suites/default/scale-tests/02-scale-up-nova.yaml diff --git a/test/kuttl/test-suites/default/scale-tests/03-assert.yaml b/test/kuttl/nova/test-suites/default/scale-tests/03-assert.yaml similarity index 100% rename from test/kuttl/test-suites/default/scale-tests/03-assert.yaml rename to test/kuttl/nova/test-suites/default/scale-tests/03-assert.yaml diff --git a/test/kuttl/test-suites/default/scale-tests/03-scale-down-nova.yaml b/test/kuttl/nova/test-suites/default/scale-tests/03-scale-down-nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/scale-tests/03-scale-down-nova.yaml rename to test/kuttl/nova/test-suites/default/scale-tests/03-scale-down-nova.yaml diff --git a/test/kuttl/test-suites/default/scale-tests/04-assert.yaml b/test/kuttl/nova/test-suites/default/scale-tests/04-assert.yaml similarity index 100% rename from test/kuttl/test-suites/default/scale-tests/04-assert.yaml rename to test/kuttl/nova/test-suites/default/scale-tests/04-assert.yaml diff --git a/test/kuttl/test-suites/default/scale-tests/04-scale-down-zero-nova.yaml b/test/kuttl/nova/test-suites/default/scale-tests/04-scale-down-zero-nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/scale-tests/04-scale-down-zero-nova.yaml rename to test/kuttl/nova/test-suites/default/scale-tests/04-scale-down-zero-nova.yaml diff --git a/test/kuttl/test-suites/default/scale-tests/05-cleanup-nova.yaml b/test/kuttl/nova/test-suites/default/scale-tests/05-cleanup-nova.yaml similarity index 100% rename from test/kuttl/test-suites/default/scale-tests/05-cleanup-nova.yaml rename to test/kuttl/nova/test-suites/default/scale-tests/05-cleanup-nova.yaml From a422847a4bfbafb5d6ce9f2da25245078a922431 Mon Sep 17 00:00:00 2001 From: Rajesh Tailor Date: Tue, 15 Jul 2025 11:32:04 +0530 Subject: [PATCH 2/2] Merge placement-operator codebase into nova-operator This commit merges the complete placement-operator codebase into nova-operator, building on the previosu multi-group restructuring. Added components: - Placement API definitions under apis/placement/v1beta1 - Placement controller in controllers/placement - Placement package in pkg/placement/ with deployment and database sync - Configuration templates for placement services - RBAC roles and CRD manifests for placement resources - Funtional and KUTTL tests for placement functionality - Updated main.go and PROJECT file to support placement group This merge enables nova-operator to manage both Nova and Placement services within a unified multi-group operator structure. Commit message assisted by: claude-4-sonnet Related: OSPRH-14957 --- Makefile | 2 +- PROJECT | 13 + ...placement.openstack.org_placementapis.yaml | 490 ++++++ apis/go.mod | 10 +- apis/go.sum | 1 - apis/placement/v1beta1/groupversion_info.go | 36 + apis/placement/v1beta1/placementapi_types.go | 246 +++ .../placement/v1beta1/placementapi_webhook.go | 213 +++ apis/placement/v1beta1/webhook_suite_test.go | 146 ++ .../v1beta1/zz_generated.deepcopy.go | 257 +++ ...placement.openstack.org_placementapis.yaml | 490 ++++++ config/crd/kustomization.yaml | 3 + ...ainjection_in_placement_placementapis.yaml | 7 + .../webhook_in_placement_placementapis.yaml | 16 + config/default/manager_default_images.yaml | 2 + .../nova-operator.clusterserviceversion.yaml | 9 + .../placement_placementapi_editor_role.yaml | 31 + .../placement_placementapi_viewer_role.yaml | 27 + config/rbac/role.yaml | 51 + config/samples/kustomization.yaml | 1 + .../placement_v1beta1_placementapi.yaml | 12 + config/webhook/manifests.yaml | 40 + .../placement/placementapi_controller.go | 1439 ++++++++++++++++ controllers/placement/suite_test.go | 80 + hack/clean_local_webhook.sh | 2 + hack/run_with_local_webhook.sh | 56 + main.go | 15 + pkg/placement/const.go | 37 + pkg/placement/dbsync.go | 91 ++ pkg/placement/deployment.go | 186 +++ pkg/placement/volumes.go | 81 + templates/placementapi/config/httpd.conf | 81 + .../config/placement-api-config.json | 70 + .../config/placement-dbsync-config.json | 23 + templates/placementapi/config/placement.conf | 29 + templates/placementapi/config/ssl.conf | 21 + test/functional/placement/base_test.go | 220 +++ .../placement/placementapi_controller_test.go | 1441 +++++++++++++++++ .../placement/placementapi_webhook_test.go | 219 +++ test/functional/placement/suite_test.go | 240 +++ .../common/assert_sample_deployment.yaml | 287 ++++ .../placement/common/cleanup-placement.yaml | 6 + .../common/errors_cleanup_placement.yaml | 94 ++ .../common/patch_placement_deploy.yaml | 5 + .../placement/common/placementapi_deploy.yaml | 1 + .../placement/common/tls_certificates.yaml | 31 + .../go_templates/apiEndpoints.gotemplate | 1 + test/kuttl/placement/lib/helper_functions.sh | 13 + .../00-cleanup-placement.yaml | 1 + .../tests/placement_deploy_tls/01-assert.yaml | 14 + .../01-tls_certificates.yaml | 1 + .../02-placementapi_deploy_tls.yaml | 1 + .../tests/placement_deploy_tls/03-assert.yaml | 217 +++ .../03-patch_placement_deploy.yaml | 1 + .../04-cleanup-placement.yaml | 1 + .../tests/placement_deploy_tls/04-errors.yaml | 1 + .../placement_scale/00-cleanup-placement.yaml | 1 + .../placement_scale/01-deploy_placement.yaml | 1 + .../tests/placement_scale/02-assert.yaml | 1 + .../02-patch_placement_deploy.yaml | 1 + .../tests/placement_scale/03-assert.yaml | 26 + .../03-scale-placementapi.yaml | 5 + .../tests/placement_scale/04-assert.yaml | 26 + .../04-scale-down-placementapi.yaml | 5 + .../tests/placement_scale/05-assert.yaml | 23 + .../05-scale-down-zero-placementapi.yaml | 5 + .../placement_scale/06-cleanup-placement.yaml | 1 + .../tests/placement_scale/06-errors.yaml | 1 + 68 files changed, 7202 insertions(+), 3 deletions(-) create mode 100644 apis/bases/placement.openstack.org_placementapis.yaml create mode 100644 apis/placement/v1beta1/groupversion_info.go create mode 100644 apis/placement/v1beta1/placementapi_types.go create mode 100644 apis/placement/v1beta1/placementapi_webhook.go create mode 100644 apis/placement/v1beta1/webhook_suite_test.go create mode 100644 apis/placement/v1beta1/zz_generated.deepcopy.go create mode 100644 config/crd/bases/placement.openstack.org_placementapis.yaml create mode 100644 config/crd/patches/cainjection_in_placement_placementapis.yaml create mode 100644 config/crd/patches/webhook_in_placement_placementapis.yaml create mode 100644 config/rbac/placement_placementapi_editor_role.yaml create mode 100644 config/rbac/placement_placementapi_viewer_role.yaml create mode 100644 config/samples/placement_v1beta1_placementapi.yaml create mode 100644 controllers/placement/placementapi_controller.go create mode 100644 controllers/placement/suite_test.go create mode 100644 pkg/placement/const.go create mode 100644 pkg/placement/dbsync.go create mode 100644 pkg/placement/deployment.go create mode 100644 pkg/placement/volumes.go create mode 100644 templates/placementapi/config/httpd.conf create mode 100644 templates/placementapi/config/placement-api-config.json create mode 100644 templates/placementapi/config/placement-dbsync-config.json create mode 100644 templates/placementapi/config/placement.conf create mode 100644 templates/placementapi/config/ssl.conf create mode 100644 test/functional/placement/base_test.go create mode 100644 test/functional/placement/placementapi_controller_test.go create mode 100644 test/functional/placement/placementapi_webhook_test.go create mode 100644 test/functional/placement/suite_test.go create mode 100644 test/kuttl/placement/common/assert_sample_deployment.yaml create mode 100644 test/kuttl/placement/common/cleanup-placement.yaml create mode 100644 test/kuttl/placement/common/errors_cleanup_placement.yaml create mode 100644 test/kuttl/placement/common/patch_placement_deploy.yaml create mode 120000 test/kuttl/placement/common/placementapi_deploy.yaml create mode 100644 test/kuttl/placement/common/tls_certificates.yaml create mode 100644 test/kuttl/placement/go_templates/apiEndpoints.gotemplate create mode 100755 test/kuttl/placement/lib/helper_functions.sh create mode 120000 test/kuttl/placement/tests/placement_deploy_tls/00-cleanup-placement.yaml create mode 100644 test/kuttl/placement/tests/placement_deploy_tls/01-assert.yaml create mode 120000 test/kuttl/placement/tests/placement_deploy_tls/01-tls_certificates.yaml create mode 120000 test/kuttl/placement/tests/placement_deploy_tls/02-placementapi_deploy_tls.yaml create mode 100644 test/kuttl/placement/tests/placement_deploy_tls/03-assert.yaml create mode 120000 test/kuttl/placement/tests/placement_deploy_tls/03-patch_placement_deploy.yaml create mode 120000 test/kuttl/placement/tests/placement_deploy_tls/04-cleanup-placement.yaml create mode 120000 test/kuttl/placement/tests/placement_deploy_tls/04-errors.yaml create mode 120000 test/kuttl/placement/tests/placement_scale/00-cleanup-placement.yaml create mode 120000 test/kuttl/placement/tests/placement_scale/01-deploy_placement.yaml create mode 120000 test/kuttl/placement/tests/placement_scale/02-assert.yaml create mode 120000 test/kuttl/placement/tests/placement_scale/02-patch_placement_deploy.yaml create mode 100644 test/kuttl/placement/tests/placement_scale/03-assert.yaml create mode 100644 test/kuttl/placement/tests/placement_scale/03-scale-placementapi.yaml create mode 100644 test/kuttl/placement/tests/placement_scale/04-assert.yaml create mode 100644 test/kuttl/placement/tests/placement_scale/04-scale-down-placementapi.yaml create mode 100644 test/kuttl/placement/tests/placement_scale/05-assert.yaml create mode 100644 test/kuttl/placement/tests/placement_scale/05-scale-down-zero-placementapi.yaml create mode 120000 test/kuttl/placement/tests/placement_scale/06-cleanup-placement.yaml create mode 120000 test/kuttl/placement/tests/placement_scale/06-errors.yaml diff --git a/Makefile b/Makefile index ee7b8a4b2..16848d075 100644 --- a/Makefile +++ b/Makefile @@ -147,7 +147,7 @@ PROC_CMD = --procs ${PROCS} test: manifests generate fmt vet envtest ginkgo ## Run tests. KUBEBUILDER_ASSETS="$(shell $(ENVTEST) -v debug --bin-dir $(LOCALBIN) use $(ENVTEST_K8S_VERSION) -p path)" \ OPERATOR_TEMPLATES="$(PWD)/templates" \ - $(GINKGO) --trace --cover --coverpkg=../../pkg/...,../../controllers,../../apis/nova/v1beta1 --coverprofile cover.out --covermode=atomic --randomize-all ${PROC_CMD} $(GINKGO_ARGS) ./test/... + $(GINKGO) --trace --cover --coverpkg=../../pkg/...,../../controllers,../../apis/nova/v1beta1,../../apis/placement/v1beta1 --coverprofile cover.out --covermode=atomic --randomize-all ${PROC_CMD} $(GINKGO_ARGS) ./test/... ##@ Build diff --git a/PROJECT b/PROJECT index e09f55324..8bbfe1fbe 100644 --- a/PROJECT +++ b/PROJECT @@ -116,4 +116,17 @@ resources: defaulting: true validation: true webhookVersion: v1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: openstack.org + group: placement + kind: PlacementAPI + path: github.com/openstack-k8s-operators/nova-operator/apis/placement/v1beta1 + version: v1beta1 + webhooks: + defaulting: true + validation: true + webhookVersion: v1 version: "3" diff --git a/apis/bases/placement.openstack.org_placementapis.yaml b/apis/bases/placement.openstack.org_placementapis.yaml new file mode 100644 index 000000000..a9f2c0281 --- /dev/null +++ b/apis/bases/placement.openstack.org_placementapis.yaml @@ -0,0 +1,490 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: placementapis.placement.openstack.org +spec: + group: placement.openstack.org + names: + kind: PlacementAPI + listKind: PlacementAPIList + plural: placementapis + singular: placementapi + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: NetworkAttachments + jsonPath: .spec.networkAttachments + name: NetworkAttachments + type: string + - description: Status + jsonPath: .status.conditions[0].status + name: Status + type: string + - description: Message + jsonPath: .status.conditions[0].message + name: Message + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: PlacementAPI is the Schema for the placementapis API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PlacementAPISpec defines the desired state of PlacementAPI + properties: + apiTimeout: + default: 60 + description: APITimeout for HAProxy, Apache + minimum: 10 + type: integer + containerImage: + description: PlacementAPI Container Image URL (will be set to environmental + default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as custom.conf file. + type: string + databaseAccount: + default: placement + description: DatabaseAccount - name of MariaDBAccount which will be + used to connect. + type: string + databaseInstance: + description: |- + MariaDB instance name + Right now required by the maridb-operator to get the credentials from the instance to create the DB + Might not be required in future + type: string + defaultConfigOverwrite: + additionalProperties: + type: string + description: DefaultConfigOverwrite - interface to overwrite default + config files like policy.yaml. + type: object + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment resource + names to expose the services to the given network + items: + type: string + type: array + nodeSelector: + additionalProperties: + type: string + description: NodeSelector to target subset of worker nodes running + this service + type: object + override: + description: Override, provides the ability to override the generated + manifest of several child resources. + properties: + service: + additionalProperties: + description: |- + RoutedOverrideSpec - a routed service override configuration for the Service created to serve traffic + to the cluster. Allows for the manifest of the created Service to be overwritten with custom configuration. + properties: + endpointURL: + type: string + metadata: + description: |- + EmbeddedLabelsAnnotations is an embedded subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta. + Only labels and annotations are included. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + spec: + description: |- + OverrideServiceSpec is a subset of the fields included in https://pkg.go.dev/k8s.io/api@v0.26.6/core/v1#ServiceSpec + Limited to Type, SessionAffinity, LoadBalancerSourceRanges, ExternalName, ExternalTrafficPolicy, SessionAffinityConfig, + IPFamilyPolicy, LoadBalancerClass and InternalTrafficPolicy + properties: + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + description: |- + Override configuration for the Service created to serve traffic to the cluster. + The key must be the endpoint type (public, internal) + type: object + type: object + passwordSelectors: + default: + service: PlacementPassword + description: PasswordSelectors - Selectors to identify the DB and + ServiceUser password from the Secret + properties: + service: + default: PlacementPassword + description: Service - Selector to get the service user password + from the Secret + type: string + type: object + preserveJobs: + default: false + description: PreserveJobs - do not delete jobs after they finished + e.g. to check logs + type: boolean + replicas: + default: 1 + description: Replicas of placement API to run + format: int32 + maximum: 32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + secret: + description: Secret containing OpenStack password information for + placement PlacementPassword + type: string + serviceUser: + default: placement + description: ServiceUser - optional username used for this service + to register in keystone + type: string + tls: + description: TLS - Parameters related to the TLS + properties: + api: + description: API tls type which encapsulates for API services + properties: + internal: + description: Internal GenericService - holds the secret for + the internal endpoint + properties: + secretName: + description: SecretName - holding the cert, key for the + service + type: string + type: object + public: + description: Public GenericService - holds the secret for + the public endpoint + properties: + secretName: + description: SecretName - holding the cert, key for the + service + type: string + type: object + type: object + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs in a pre-created + bundle file + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + required: + - containerImage + - databaseInstance + - secret + type: object + status: + description: PlacementAPIStatus defines the observed state of PlacementAPI + properties: + conditions: + description: Conditions + items: + description: Condition defines an observation of a API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. + type: string + severity: + description: |- + Severity provides a classification of Reason code, so the current situation is immediately + understandable and could act accordingly. + It is meant for situations where Status=False and it should be indicated if it is just + informational, warning (next reconciliation might fix it) or an error (e.g. DB create issue + and no actions to automatically resolve the issue can/should be done). + For conditions where Status=Unknown or Status=True the Severity should be SeverityNone. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + databaseHostname: + description: Placement Database Hostname + type: string + hash: + additionalProperties: + type: string + description: Map of hashes to track e.g. job status + type: object + lastAppliedTopology: + description: LastAppliedTopology - the last applied Topology + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + networkAttachments: + additionalProperties: + items: + type: string + type: array + description: NetworkAttachments status of the deployment pods + type: object + observedGeneration: + description: ObservedGeneration - the most recent generation observed + for this service. If the observed generation is less than the spec + generation, then the controller has not processed the latest changes. + format: int64 + type: integer + readyCount: + description: ReadyCount of placement API instances + format: int32 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/apis/go.mod b/apis/go.mod index dabf6468e..44db0dfc4 100644 --- a/apis/go.mod +++ b/apis/go.mod @@ -4,11 +4,14 @@ go 1.21 require ( github.com/google/go-cmp v0.7.0 + github.com/onsi/ginkgo/v2 v2.20.1 + github.com/onsi/gomega v1.34.1 github.com/openstack-k8s-operators/infra-operator/apis v0.6.1-0.20250813063935-fdc20530dcf1 github.com/openstack-k8s-operators/lib-common/modules/common v0.6.1-0.20250730071847-837b07f8d72f github.com/robfig/cron/v3 v3.0.1 k8s.io/api v0.29.15 k8s.io/apimachinery v0.29.15 + k8s.io/client-go v0.29.15 k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 sigs.k8s.io/controller-runtime v0.17.6 ) @@ -21,14 +24,17 @@ require ( github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect github.com/google/uuid v1.6.0 // indirect github.com/imdario/mergo v0.3.16 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -43,6 +49,8 @@ require ( github.com/prometheus/common v0.51.1 // indirect github.com/prometheus/procfs v0.13.0 // indirect github.com/spf13/pflag v1.0.5 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect golang.org/x/net v0.28.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect @@ -50,6 +58,7 @@ require ( golang.org/x/term v0.23.0 // indirect golang.org/x/text v0.17.0 // indirect golang.org/x/time v0.5.0 // indirect + golang.org/x/tools v0.24.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/protobuf v1.34.1 // indirect @@ -57,7 +66,6 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.29.15 // indirect - k8s.io/client-go v0.29.15 // indirect k8s.io/component-base v0.29.15 // indirect k8s.io/klog/v2 v2.120.1 // indirect k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 // indirect diff --git a/apis/go.sum b/apis/go.sum index 48f36439a..59187ff4b 100644 --- a/apis/go.sum +++ b/apis/go.sum @@ -24,7 +24,6 @@ github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= diff --git a/apis/placement/v1beta1/groupversion_info.go b/apis/placement/v1beta1/groupversion_info.go new file mode 100644 index 000000000..4f0c92c13 --- /dev/null +++ b/apis/placement/v1beta1/groupversion_info.go @@ -0,0 +1,36 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the placement v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=placement.openstack.org +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "placement.openstack.org", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/apis/placement/v1beta1/placementapi_types.go b/apis/placement/v1beta1/placementapi_types.go new file mode 100644 index 000000000..2dd869afb --- /dev/null +++ b/apis/placement/v1beta1/placementapi_types.go @@ -0,0 +1,246 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/service" + "github.com/openstack-k8s-operators/lib-common/modules/common/tls" + "github.com/openstack-k8s-operators/lib-common/modules/common/util" + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + "k8s.io/apimachinery/pkg/util/validation/field" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + // DbSyncHash hash + DbSyncHash = "dbsync" + + // DeploymentHash hash used to detect changes + DeploymentHash = "deployment" + + // Container image fall-back defaults + + // PlacementAPIContainerImage is the fall-back container image for PlacementAPI + PlacementAPIContainerImage = "quay.io/podified-antelope-centos9/openstack-placement-api:current-podified" +) + +// PlacementAPISpec defines the desired state of PlacementAPI +type PlacementAPISpec struct { + PlacementAPISpecCore `json:",inline"` + + // +kubebuilder:validation:Required + // PlacementAPI Container Image URL (will be set to environmental default if empty) + ContainerImage string `json:"containerImage"` +} + +// PlacementAPISpecCore - +type PlacementAPISpecCore struct { + // +kubebuilder:validation:Optional + // +kubebuilder:default=60 + // +kubebuilder:validation:Minimum=10 + // APITimeout for HAProxy, Apache + APITimeout int `json:"apiTimeout"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=placement + // ServiceUser - optional username used for this service to register in keystone + ServiceUser string `json:"serviceUser"` + + // +kubebuilder:validation:Required + // MariaDB instance name + // Right now required by the maridb-operator to get the credentials from the instance to create the DB + // Might not be required in future + DatabaseInstance string `json:"databaseInstance"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=placement + // DatabaseAccount - name of MariaDBAccount which will be used to connect. + DatabaseAccount string `json:"databaseAccount"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=1 + // +kubebuilder:validation:Maximum=32 + // +kubebuilder:validation:Minimum=0 + // Replicas of placement API to run + Replicas *int32 `json:"replicas"` + + // +kubebuilder:validation:Required + // Secret containing OpenStack password information for placement PlacementPassword + Secret string `json:"secret"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default={service: PlacementPassword} + // PasswordSelectors - Selectors to identify the DB and ServiceUser password from the Secret + PasswordSelectors PasswordSelector `json:"passwordSelectors"` + + // +kubebuilder:validation:Optional + // NodeSelector to target subset of worker nodes running this service + NodeSelector *map[string]string `json:"nodeSelector,omitempty"` + + // +kubebuilder:validation:Optional + // +kubebuilder:default=false + // PreserveJobs - do not delete jobs after they finished e.g. to check logs + PreserveJobs bool `json:"preserveJobs"` + + // +kubebuilder:validation:Optional + // CustomServiceConfig - customize the service config using this parameter to change service defaults, + // or overwrite rendered information using raw OpenStack config format. The content gets added to + // to /etc//.conf.d directory as custom.conf file. + CustomServiceConfig string `json:"customServiceConfig"` + + // +kubebuilder:validation:Optional + // DefaultConfigOverwrite - interface to overwrite default config files like policy.yaml. + DefaultConfigOverwrite map[string]string `json:"defaultConfigOverwrite,omitempty"` + + // +kubebuilder:validation:Optional + // Resources - Compute Resources required by this service (Limits/Requests). + // https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + Resources corev1.ResourceRequirements `json:"resources,omitempty"` + + // +kubebuilder:validation:Optional + // NetworkAttachments is a list of NetworkAttachment resource names to expose the services to the given network + NetworkAttachments []string `json:"networkAttachments,omitempty"` + + // +kubebuilder:validation:Optional + // Override, provides the ability to override the generated manifest of several child resources. + Override APIOverrideSpec `json:"override,omitempty"` + + // +kubebuilder:validation:Optional + // +operator-sdk:csv:customresourcedefinitions:type=spec + // TLS - Parameters related to the TLS + TLS tls.API `json:"tls,omitempty"` + + // +kubebuilder:validation:Optional + // TopologyRef to apply the Topology defined by the associated CR referenced + // by name + TopologyRef *topologyv1.TopoRef `json:"topologyRef,omitempty"` +} + +// APIOverrideSpec to override the generated manifest of several child resources. +type APIOverrideSpec struct { + // Override configuration for the Service created to serve traffic to the cluster. + // The key must be the endpoint type (public, internal) + Service map[service.Endpoint]service.RoutedOverrideSpec `json:"service,omitempty"` +} + +// PasswordSelector to identify the DB and AdminUser password from the Secret +type PasswordSelector struct { + // +kubebuilder:validation:Optional + // +kubebuilder:default="PlacementPassword" + // Service - Selector to get the service user password from the Secret + Service string `json:"service"` +} + +// PlacementAPIStatus defines the observed state of PlacementAPI +type PlacementAPIStatus struct { + // ReadyCount of placement API instances + ReadyCount int32 `json:"readyCount,omitempty"` + + // Map of hashes to track e.g. job status + Hash map[string]string `json:"hash,omitempty"` + + // Conditions + Conditions condition.Conditions `json:"conditions,omitempty" optional:"true"` + + // Placement Database Hostname + DatabaseHostname string `json:"databaseHostname,omitempty"` + + // NetworkAttachments status of the deployment pods + NetworkAttachments map[string][]string `json:"networkAttachments,omitempty"` + + //ObservedGeneration - the most recent generation observed for this service. If the observed generation is less than the spec generation, then the controller has not processed the latest changes. + ObservedGeneration int64 `json:"observedGeneration,omitempty"` + + // LastAppliedTopology - the last applied Topology + LastAppliedTopology *topologyv1.TopoRef `json:"lastAppliedTopology,omitempty"` +} + +// PlacementAPI is the Schema for the placementapis API +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +// +kubebuilder:printcolumn:name="NetworkAttachments",type="string",JSONPath=".spec.networkAttachments",description="NetworkAttachments" +// +kubebuilder:printcolumn:name="Status",type="string",JSONPath=".status.conditions[0].status",description="Status" +// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[0].message",description="Message" +type PlacementAPI struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec PlacementAPISpec `json:"spec,omitempty"` + Status PlacementAPIStatus `json:"status,omitempty"` +} + +// PlacementAPIList contains a list of PlacementAPI +// +kubebuilder:object:root=true +type PlacementAPIList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []PlacementAPI `json:"items"` +} + +func init() { + SchemeBuilder.Register(&PlacementAPI{}, &PlacementAPIList{}) +} + +// IsReady - returns true if PlacementAPI is reconciled successfully +func (instance PlacementAPI) IsReady() bool { + return instance.Status.Conditions.IsTrue(condition.ReadyCondition) +} + +// RbacConditionsSet - set the conditions for the rbac object +func (instance PlacementAPI) RbacConditionsSet(c *condition.Condition) { + instance.Status.Conditions.Set(c) +} + +// RbacNamespace - return the namespace +func (instance PlacementAPI) RbacNamespace() string { + return instance.Namespace +} + +// RbacResourceName - return the name to be used for rbac objects (serviceaccount, role, rolebinding) +func (instance PlacementAPI) RbacResourceName() string { + return "placement-" + instance.Name +} + +// SetupDefaults - initializes any CRD field defaults based on environment variables (the defaulting mechanism itself is implemented via webhooks) +func SetupDefaults() { + // Acquire environmental defaults and initialize Placement defaults with them + placementDefaults := PlacementAPIDefaults{ + ContainerImageURL: util.GetEnvVar("RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT", PlacementAPIContainerImage), + APITimeout: 60, + } + + SetupPlacementAPIDefaults(placementDefaults) +} + +// GetSecret returns the value of the Nova.Spec.Secret +func (instance PlacementAPI) GetSecret() string { + return instance.Spec.Secret +} + +// ValidateTopology - +func (instance *PlacementAPISpecCore) ValidateTopology( + basePath *field.Path, + namespace string, +) field.ErrorList { + var allErrs field.ErrorList + allErrs = append(allErrs, topologyv1.ValidateTopologyRef( + instance.TopologyRef, + *basePath.Child("topologyRef"), namespace)...) + return allErrs +} diff --git a/apis/placement/v1beta1/placementapi_webhook.go b/apis/placement/v1beta1/placementapi_webhook.go new file mode 100644 index 000000000..43b0a54d2 --- /dev/null +++ b/apis/placement/v1beta1/placementapi_webhook.go @@ -0,0 +1,213 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Generated by: +// +// operator-sdk create webhook --group placement --version v1beta1 --kind PlacementAPI --programmatic-validation --defaulting +// + +package v1beta1 + +import ( + "fmt" + + "github.com/openstack-k8s-operators/lib-common/modules/common/service" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" + ctrl "sigs.k8s.io/controller-runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" +) + +// PlacementAPIDefaults - +type PlacementAPIDefaults struct { + ContainerImageURL string + APITimeout int +} + +var placementAPIDefaults PlacementAPIDefaults + +// log is for logging in this package. +var placementapilog = logf.Log.WithName("placementapi-resource") + +// SetupPlacementAPIDefaults - initialize PlacementAPI spec defaults for use with either internal or external webhooks +func SetupPlacementAPIDefaults(defaults PlacementAPIDefaults) { + placementAPIDefaults = defaults + placementapilog.Info("PlacementAPI defaults initialized", "defaults", defaults) +} + +// SetupWebhookWithManager sets up the webhook with the Manager +func (r *PlacementAPI) SetupWebhookWithManager(mgr ctrl.Manager) error { + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +//+kubebuilder:webhook:path=/mutate-placement-openstack-org-v1beta1-placementapi,mutating=true,failurePolicy=fail,sideEffects=None,groups=placement.openstack.org,resources=placementapis,verbs=create;update,versions=v1beta1,name=mplacementapi.kb.io,admissionReviewVersions=v1 + +var _ webhook.Defaulter = &PlacementAPI{} + +// Default implements webhook.Defaulter so a webhook will be registered for the type +func (r *PlacementAPI) Default() { + placementapilog.Info("default", "name", r.Name) + + r.Spec.Default() +} + +// Default - set defaults for this PlacementAPI spec +func (spec *PlacementAPISpec) Default() { + if spec.ContainerImage == "" { + spec.ContainerImage = placementAPIDefaults.ContainerImageURL + } + if spec.APITimeout == 0 { + spec.APITimeout = placementAPIDefaults.APITimeout + } + +} + +// Default - set defaults for this PlacementAPI core spec (this version is used by the OpenStackControlplane webhook) +func (spec *PlacementAPISpecCore) Default() { + // nothing here yet +} + +// TODO(user): change verbs to "verbs=create;update;delete" if you want to enable deletion validation. +//+kubebuilder:webhook:path=/validate-placement-openstack-org-v1beta1-placementapi,mutating=false,failurePolicy=fail,sideEffects=None,groups=placement.openstack.org,resources=placementapis,verbs=create;update,versions=v1beta1,name=vplacementapi.kb.io,admissionReviewVersions=v1 + +var _ webhook.Validator = &PlacementAPI{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *PlacementAPI) ValidateCreate() (admission.Warnings, error) { + placementapilog.Info("validate create", "name", r.Name) + + errors := r.Spec.ValidateCreate(field.NewPath("spec"), r.Namespace) + if len(errors) != 0 { + placementapilog.Info("validation failed", "name", r.Name) + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "placement.openstack.org", Kind: "PlacementAPI"}, + r.Name, errors) + } + return nil, nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *PlacementAPI) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { + placementapilog.Info("validate update", "name", r.Name) + oldPlacement, ok := old.(*PlacementAPI) + if !ok || oldPlacement == nil { + return nil, apierrors.NewInternalError(fmt.Errorf("unable to convert existing object")) + } + + errors := r.Spec.ValidateUpdate(oldPlacement.Spec, field.NewPath("spec"), r.Namespace) + if len(errors) != 0 { + placementapilog.Info("validation failed", "name", r.Name) + return nil, apierrors.NewInvalid( + schema.GroupKind{Group: "placement.openstack.org", Kind: "PlacementAPI"}, + r.Name, errors) + } + return nil, nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *PlacementAPI) ValidateDelete() (admission.Warnings, error) { + placementapilog.Info("validate delete", "name", r.Name) + + // TODO(user): fill in your validation logic upon object deletion. + return nil, nil +} + +func (r PlacementAPISpec) ValidateCreate(basePath *field.Path, namespace string) field.ErrorList { + return r.PlacementAPISpecCore.ValidateCreate(basePath, namespace) +} + +func (r PlacementAPISpec) ValidateUpdate(old PlacementAPISpec, basePath *field.Path, namespace string) field.ErrorList { + return r.PlacementAPISpecCore.ValidateCreate(basePath, namespace) +} + +func (r PlacementAPISpecCore) ValidateCreate(basePath *field.Path, namespace string) field.ErrorList { + var allErrs field.ErrorList + + // validate the service override key is valid + allErrs = append(allErrs, service.ValidateRoutedOverrides(basePath.Child("override").Child("service"), r.Override.Service)...) + + allErrs = append(allErrs, ValidateDefaultConfigOverwrite(basePath, r.DefaultConfigOverwrite)...) + + // When a TopologyRef CR is referenced, fail if a different Namespace is + // referenced because is not supported + allErrs = append(allErrs, r.ValidateTopology(basePath, namespace)...) + + return allErrs +} + +func (r PlacementAPISpecCore) ValidateUpdate(old PlacementAPISpecCore, basePath *field.Path, namespace string) field.ErrorList { + var allErrs field.ErrorList + + // validate the service override key is valid + allErrs = append(allErrs, service.ValidateRoutedOverrides(basePath.Child("override").Child("service"), r.Override.Service)...) + + allErrs = append(allErrs, ValidateDefaultConfigOverwrite(basePath, r.DefaultConfigOverwrite)...) + + // When a TopologyRef CR is referenced, fail if a different Namespace is + // referenced because is not supported + allErrs = append(allErrs, r.ValidateTopology(basePath, namespace)...) + + return allErrs +} + +func ValidateDefaultConfigOverwrite( + basePath *field.Path, + validateConfigOverwrite map[string]string, +) field.ErrorList { + var errors field.ErrorList + for requested := range validateConfigOverwrite { + if requested != "policy.yaml" { + errors = append( + errors, + field.Invalid( + basePath.Child("defaultConfigOverwrite"), + requested, + "Only the following keys are valid: policy.yaml", + ), + ) + } + } + return errors +} + +// SetDefaultRouteAnnotations sets HAProxy timeout values of the route +func (spec *PlacementAPISpecCore) SetDefaultRouteAnnotations(annotations map[string]string) { + const haProxyAnno = "haproxy.router.openshift.io/timeout" + // Use a custom annotation to flag when the operator has set the default HAProxy timeout + // With the annotation func determines when to overwrite existing HAProxy timeout with the APITimeout + const placementAnno = "api.placement.openstack.org/timeout" + valPlacementAPI, okPlacementAPI := annotations[placementAnno] + valHAProxy, okHAProxy := annotations[haProxyAnno] + // Human operator set the HAProxy timeout manually + if !okPlacementAPI && okHAProxy { + return + } + // Human operator modified the HAProxy timeout manually without removing the Placemen flag + if okPlacementAPI && okHAProxy && valPlacementAPI != valHAProxy { + delete(annotations, placementAnno) + placementapilog.Info("Human operator modified the HAProxy timeout manually without removing the Placement flag. Deleting the Placement flag to ensure proper configuration.") + return + } + timeout := fmt.Sprintf("%ds", spec.APITimeout) + annotations[placementAnno] = timeout + annotations[haProxyAnno] = timeout +} diff --git a/apis/placement/v1beta1/webhook_suite_test.go b/apis/placement/v1beta1/webhook_suite_test.go new file mode 100644 index 000000000..16598793a --- /dev/null +++ b/apis/placement/v1beta1/webhook_suite_test.go @@ -0,0 +1,146 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + admissionv1beta1 "k8s.io/api/admission/v1beta1" + kscheme "k8s.io/client-go/kubernetes/scheme" + + //+kubebuilder:scaffold:imports + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment +var ctx context.Context +var cancel context.CancelFunc + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Webhook Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: false, + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "..", "config", "webhook")}, + }, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + scheme := runtime.NewScheme() + err = AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + err = admissionv1beta1.AddToScheme(scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // start webhook server using Manager + webhookInstallOptions := &testEnv.WebhookInstallOptions + mgr, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: kscheme.Scheme, + // NOTE(gibi): disable metrics reporting in test to allow + // parallel test execution. Otherwise each instance would like to + // bind to the same port + Metrics: metricsserver.Options{ + BindAddress: "0", + }, + WebhookServer: webhook.NewServer( + webhook.Options{ + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + }), + LeaderElection: false, + }) + Expect(err).NotTo(HaveOccurred()) + + err = (&PlacementAPI{}).SetupWebhookWithManager(mgr) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:webhook + + go func() { + defer GinkgoRecover() + err = mgr.Start(ctx) + Expect(err).NotTo(HaveOccurred()) + }() + + // wait for the webhook server to get ready + dialer := &net.Dialer{Timeout: time.Second} + addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) + Eventually(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) + if err != nil { + return err + } + conn.Close() + return nil + }).Should(Succeed()) + +}) + +var _ = AfterSuite(func() { + cancel() + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/apis/placement/v1beta1/zz_generated.deepcopy.go b/apis/placement/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 000000000..09d35e507 --- /dev/null +++ b/apis/placement/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,257 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + topologyv1beta1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/service" + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIOverrideSpec) DeepCopyInto(out *APIOverrideSpec) { + *out = *in + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = make(map[service.Endpoint]service.RoutedOverrideSpec, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIOverrideSpec. +func (in *APIOverrideSpec) DeepCopy() *APIOverrideSpec { + if in == nil { + return nil + } + out := new(APIOverrideSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PasswordSelector) DeepCopyInto(out *PasswordSelector) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PasswordSelector. +func (in *PasswordSelector) DeepCopy() *PasswordSelector { + if in == nil { + return nil + } + out := new(PasswordSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementAPI) DeepCopyInto(out *PlacementAPI) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementAPI. +func (in *PlacementAPI) DeepCopy() *PlacementAPI { + if in == nil { + return nil + } + out := new(PlacementAPI) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlacementAPI) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementAPIDefaults) DeepCopyInto(out *PlacementAPIDefaults) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementAPIDefaults. +func (in *PlacementAPIDefaults) DeepCopy() *PlacementAPIDefaults { + if in == nil { + return nil + } + out := new(PlacementAPIDefaults) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementAPIList) DeepCopyInto(out *PlacementAPIList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PlacementAPI, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementAPIList. +func (in *PlacementAPIList) DeepCopy() *PlacementAPIList { + if in == nil { + return nil + } + out := new(PlacementAPIList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PlacementAPIList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementAPISpec) DeepCopyInto(out *PlacementAPISpec) { + *out = *in + in.PlacementAPISpecCore.DeepCopyInto(&out.PlacementAPISpecCore) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementAPISpec. +func (in *PlacementAPISpec) DeepCopy() *PlacementAPISpec { + if in == nil { + return nil + } + out := new(PlacementAPISpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementAPISpecCore) DeepCopyInto(out *PlacementAPISpecCore) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + out.PasswordSelectors = in.PasswordSelectors + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(map[string]string) + if **in != nil { + in, out := *in, *out + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + } + if in.DefaultConfigOverwrite != nil { + in, out := &in.DefaultConfigOverwrite, &out.DefaultConfigOverwrite + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.NetworkAttachments != nil { + in, out := &in.NetworkAttachments, &out.NetworkAttachments + *out = make([]string, len(*in)) + copy(*out, *in) + } + in.Override.DeepCopyInto(&out.Override) + in.TLS.DeepCopyInto(&out.TLS) + if in.TopologyRef != nil { + in, out := &in.TopologyRef, &out.TopologyRef + *out = new(topologyv1beta1.TopoRef) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementAPISpecCore. +func (in *PlacementAPISpecCore) DeepCopy() *PlacementAPISpecCore { + if in == nil { + return nil + } + out := new(PlacementAPISpecCore) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PlacementAPIStatus) DeepCopyInto(out *PlacementAPIStatus) { + *out = *in + if in.Hash != nil { + in, out := &in.Hash, &out.Hash + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make(condition.Conditions, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NetworkAttachments != nil { + in, out := &in.NetworkAttachments, &out.NetworkAttachments + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + inVal := (*in)[key] + in, out := &inVal, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.LastAppliedTopology != nil { + in, out := &in.LastAppliedTopology, &out.LastAppliedTopology + *out = new(topologyv1beta1.TopoRef) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PlacementAPIStatus. +func (in *PlacementAPIStatus) DeepCopy() *PlacementAPIStatus { + if in == nil { + return nil + } + out := new(PlacementAPIStatus) + in.DeepCopyInto(out) + return out +} diff --git a/config/crd/bases/placement.openstack.org_placementapis.yaml b/config/crd/bases/placement.openstack.org_placementapis.yaml new file mode 100644 index 000000000..a9f2c0281 --- /dev/null +++ b/config/crd/bases/placement.openstack.org_placementapis.yaml @@ -0,0 +1,490 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.14.0 + name: placementapis.placement.openstack.org +spec: + group: placement.openstack.org + names: + kind: PlacementAPI + listKind: PlacementAPIList + plural: placementapis + singular: placementapi + scope: Namespaced + versions: + - additionalPrinterColumns: + - description: NetworkAttachments + jsonPath: .spec.networkAttachments + name: NetworkAttachments + type: string + - description: Status + jsonPath: .status.conditions[0].status + name: Status + type: string + - description: Message + jsonPath: .status.conditions[0].message + name: Message + type: string + name: v1beta1 + schema: + openAPIV3Schema: + description: PlacementAPI is the Schema for the placementapis API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: PlacementAPISpec defines the desired state of PlacementAPI + properties: + apiTimeout: + default: 60 + description: APITimeout for HAProxy, Apache + minimum: 10 + type: integer + containerImage: + description: PlacementAPI Container Image URL (will be set to environmental + default if empty) + type: string + customServiceConfig: + description: |- + CustomServiceConfig - customize the service config using this parameter to change service defaults, + or overwrite rendered information using raw OpenStack config format. The content gets added to + to /etc//.conf.d directory as custom.conf file. + type: string + databaseAccount: + default: placement + description: DatabaseAccount - name of MariaDBAccount which will be + used to connect. + type: string + databaseInstance: + description: |- + MariaDB instance name + Right now required by the maridb-operator to get the credentials from the instance to create the DB + Might not be required in future + type: string + defaultConfigOverwrite: + additionalProperties: + type: string + description: DefaultConfigOverwrite - interface to overwrite default + config files like policy.yaml. + type: object + networkAttachments: + description: NetworkAttachments is a list of NetworkAttachment resource + names to expose the services to the given network + items: + type: string + type: array + nodeSelector: + additionalProperties: + type: string + description: NodeSelector to target subset of worker nodes running + this service + type: object + override: + description: Override, provides the ability to override the generated + manifest of several child resources. + properties: + service: + additionalProperties: + description: |- + RoutedOverrideSpec - a routed service override configuration for the Service created to serve traffic + to the cluster. Allows for the manifest of the created Service to be overwritten with custom configuration. + properties: + endpointURL: + type: string + metadata: + description: |- + EmbeddedLabelsAnnotations is an embedded subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta. + Only labels and annotations are included. + properties: + annotations: + additionalProperties: + type: string + description: |- + Annotations is an unstructured key value map stored with a resource that may be + set by external tools to store and retrieve arbitrary metadata. They are not + queryable and should be preserved when modifying objects. + More info: http://kubernetes.io/docs/user-guide/annotations + type: object + labels: + additionalProperties: + type: string + description: |- + Map of string keys and values that can be used to organize and categorize + (scope and select) objects. May match selectors of replication controllers + and services. + More info: http://kubernetes.io/docs/user-guide/labels + type: object + type: object + spec: + description: |- + OverrideServiceSpec is a subset of the fields included in https://pkg.go.dev/k8s.io/api@v0.26.6/core/v1#ServiceSpec + Limited to Type, SessionAffinity, LoadBalancerSourceRanges, ExternalName, ExternalTrafficPolicy, SessionAffinityConfig, + IPFamilyPolicy, LoadBalancerClass and InternalTrafficPolicy + properties: + externalName: + description: |- + externalName is the external reference that discovery mechanisms will + return as an alias for this service (e.g. a DNS CNAME record). No + proxying will be involved. Must be a lowercase RFC-1123 hostname + (https://tools.ietf.org/html/rfc1123) and requires `type` to be "ExternalName". + type: string + externalTrafficPolicy: + description: |- + externalTrafficPolicy describes how nodes distribute service traffic they + receive on one of the Service's "externally-facing" addresses (NodePorts, + ExternalIPs, and LoadBalancer IPs). If set to "Local", the proxy will configure + the service in a way that assumes that external load balancers will take care + of balancing the service traffic between nodes, and so each node will deliver + traffic only to the node-local endpoints of the service, without masquerading + the client source IP. (Traffic mistakenly sent to a node with no endpoints will + be dropped.) The default value, "Cluster", uses the standard behavior of + routing to all endpoints evenly (possibly modified by topology and other + features). Note that traffic sent to an External IP or LoadBalancer IP from + within the cluster will always get "Cluster" semantics, but clients sending to + a NodePort from within the cluster may need to take traffic policy into account + when picking a node. + type: string + internalTrafficPolicy: + description: |- + InternalTrafficPolicy describes how nodes distribute service traffic they + receive on the ClusterIP. If set to "Local", the proxy will assume that pods + only want to talk to endpoints of the service on the same node as the pod, + dropping the traffic if there are no local endpoints. The default value, + "Cluster", uses the standard behavior of routing to all endpoints evenly + (possibly modified by topology and other features). + type: string + ipFamilyPolicy: + description: |- + IPFamilyPolicy represents the dual-stack-ness requested or required by + this Service. If there is no value provided, then this field will be set + to SingleStack. Services can be "SingleStack" (a single IP family), + "PreferDualStack" (two IP families on dual-stack configured clusters or + a single IP family on single-stack clusters), or "RequireDualStack" + (two IP families on dual-stack configured clusters, otherwise fail). The + ipFamilies and clusterIPs fields depend on the value of this field. This + field will be wiped when updating a service to type ExternalName. + type: string + loadBalancerClass: + description: |- + loadBalancerClass is the class of the load balancer implementation this Service belongs to. + If specified, the value of this field must be a label-style identifier, with an optional prefix, + e.g. "internal-vip" or "example.com/internal-vip". Unprefixed names are reserved for end-users. + This field can only be set when the Service type is 'LoadBalancer'. If not set, the default load + balancer implementation is used, today this is typically done through the cloud provider integration, + but should apply for any default implementation. If set, it is assumed that a load balancer + implementation is watching for Services with a matching class. Any default load balancer + implementation (e.g. cloud providers) should ignore Services that set this field. + This field can only be set when creating or updating a Service to type 'LoadBalancer'. + Once set, it can not be changed. This field will be wiped when a service is updated to a non 'LoadBalancer' type. + type: string + loadBalancerSourceRanges: + description: |- + If specified and supported by the platform, this will restrict traffic through the cloud-provider + load-balancer will be restricted to the specified client IPs. This field will be ignored if the + cloud-provider does not support the feature." + More info: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/ + items: + type: string + type: array + x-kubernetes-list-type: atomic + sessionAffinity: + description: |- + Supports "ClientIP" and "None". Used to maintain session affinity. + Enable client IP based session affinity. + Must be ClientIP or None. + Defaults to None. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + type: string + sessionAffinityConfig: + description: sessionAffinityConfig contains the configurations + of session affinity. + properties: + clientIP: + description: clientIP contains the configurations + of Client IP based session affinity. + properties: + timeoutSeconds: + description: |- + timeoutSeconds specifies the seconds of ClientIP type session sticky time. + The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + Default value is 10800(for 3 hours). + format: int32 + type: integer + type: object + type: object + type: + description: |- + type determines how the Service is exposed. Defaults to ClusterIP. Valid + options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + "ClusterIP" allocates a cluster-internal IP address for load-balancing + to endpoints. Endpoints are determined by the selector or if that is not + specified, by manual construction of an Endpoints object or + EndpointSlice objects. If clusterIP is "None", no virtual IP is + allocated and the endpoints are published as a set of endpoints rather + than a virtual IP. + "NodePort" builds on ClusterIP and allocates a port on every node which + routes to the same endpoints as the clusterIP. + "LoadBalancer" builds on NodePort and creates an external load-balancer + (if supported in the current cloud) which routes to the same endpoints + as the clusterIP. + "ExternalName" aliases this service to the specified externalName. + Several other fields do not apply to ExternalName services. + More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: string + type: object + type: object + description: |- + Override configuration for the Service created to serve traffic to the cluster. + The key must be the endpoint type (public, internal) + type: object + type: object + passwordSelectors: + default: + service: PlacementPassword + description: PasswordSelectors - Selectors to identify the DB and + ServiceUser password from the Secret + properties: + service: + default: PlacementPassword + description: Service - Selector to get the service user password + from the Secret + type: string + type: object + preserveJobs: + default: false + description: PreserveJobs - do not delete jobs after they finished + e.g. to check logs + type: boolean + replicas: + default: 1 + description: Replicas of placement API to run + format: int32 + maximum: 32 + minimum: 0 + type: integer + resources: + description: |- + Resources - Compute Resources required by this service (Limits/Requests). + https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + properties: + claims: + description: |- + Claims lists the names of resources, defined in spec.resourceClaims, + that are used by this container. + + + This is an alpha field and requires enabling the + DynamicResourceAllocation feature gate. + + + This field is immutable. It can only be set for containers. + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: |- + Name must match the name of one entry in pod.spec.resourceClaims of + the Pod where this field is used. It makes that resource available + inside a container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Limits describes the maximum amount of compute resources allowed. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: |- + Requests describes the minimum amount of compute resources required. + If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + otherwise to an implementation-defined value. Requests cannot exceed Limits. + More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + type: object + type: object + secret: + description: Secret containing OpenStack password information for + placement PlacementPassword + type: string + serviceUser: + default: placement + description: ServiceUser - optional username used for this service + to register in keystone + type: string + tls: + description: TLS - Parameters related to the TLS + properties: + api: + description: API tls type which encapsulates for API services + properties: + internal: + description: Internal GenericService - holds the secret for + the internal endpoint + properties: + secretName: + description: SecretName - holding the cert, key for the + service + type: string + type: object + public: + description: Public GenericService - holds the secret for + the public endpoint + properties: + secretName: + description: SecretName - holding the cert, key for the + service + type: string + type: object + type: object + caBundleSecretName: + description: CaBundleSecretName - holding the CA certs in a pre-created + bundle file + type: string + type: object + topologyRef: + description: |- + TopologyRef to apply the Topology defined by the associated CR referenced + by name + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + required: + - containerImage + - databaseInstance + - secret + type: object + status: + description: PlacementAPIStatus defines the observed state of PlacementAPI + properties: + conditions: + description: Conditions + items: + description: Condition defines an observation of a API resource + operational state. + properties: + lastTransitionTime: + description: |- + Last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when + the API field changed is acceptable. + format: date-time + type: string + message: + description: A human readable message indicating details about + the transition. + type: string + reason: + description: The reason for the condition's last transition + in CamelCase. + type: string + severity: + description: |- + Severity provides a classification of Reason code, so the current situation is immediately + understandable and could act accordingly. + It is meant for situations where Status=False and it should be indicated if it is just + informational, warning (next reconciliation might fix it) or an error (e.g. DB create issue + and no actions to automatically resolve the issue can/should be done). + For conditions where Status=Unknown or Status=True the Severity should be SeverityNone. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of condition in CamelCase. + type: string + required: + - lastTransitionTime + - status + - type + type: object + type: array + databaseHostname: + description: Placement Database Hostname + type: string + hash: + additionalProperties: + type: string + description: Map of hashes to track e.g. job status + type: object + lastAppliedTopology: + description: LastAppliedTopology - the last applied Topology + properties: + name: + description: Name - The Topology CR name that the Service references + type: string + namespace: + description: |- + Namespace - The Namespace to fetch the Topology CR referenced + NOTE: Namespace currently points by default to the same namespace where + the Service is deployed. Customizing the namespace is not supported and + webhooks prevent editing this field to a value different from the + current project + type: string + type: object + networkAttachments: + additionalProperties: + items: + type: string + type: array + description: NetworkAttachments status of the deployment pods + type: object + observedGeneration: + description: ObservedGeneration - the most recent generation observed + for this service. If the observed generation is less than the spec + generation, then the controller has not processed the latest changes. + format: int64 + type: integer + readyCount: + description: ReadyCount of placement API instances + format: int32 + type: integer + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index fac92d16a..f27e0deba 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -10,6 +10,7 @@ resources: - bases/nova.openstack.org_novacells.yaml - bases/nova.openstack.org_nova.yaml - bases/nova.openstack.org_novacomputes.yaml +- bases/placement.openstack.org_placementapis.yaml #+kubebuilder:scaffold:crdkustomizeresource patches: @@ -22,6 +23,7 @@ patches: #- path: patches/webhook_in_nova_novanovncproxies.yaml #- path: patches/webhook_in_nova_novacells.yaml #- path: patches/webhook_in_nova_nova.yaml +#- path: patches/webhook_in_placement_placementapis.yaml #+kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. @@ -33,6 +35,7 @@ patches: #- path: patches/cainjection_in_nova_novanovncproxies.yaml #- path: patches/cainjection_in_nova_novacells.yaml #- path: patches/cainjection_in_nova_nova.yaml +#- patches/cainjection_in_placement_placementapis.yaml #+kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/config/crd/patches/cainjection_in_placement_placementapis.yaml b/config/crd/patches/cainjection_in_placement_placementapis.yaml new file mode 100644 index 000000000..afa8817bb --- /dev/null +++ b/config/crd/patches/cainjection_in_placement_placementapis.yaml @@ -0,0 +1,7 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: placementapis.placement.openstack.org diff --git a/config/crd/patches/webhook_in_placement_placementapis.yaml b/config/crd/patches/webhook_in_placement_placementapis.yaml new file mode 100644 index 000000000..24229da3c --- /dev/null +++ b/config/crd/patches/webhook_in_placement_placementapis.yaml @@ -0,0 +1,16 @@ +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: placementapis.placement.openstack.org +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/default/manager_default_images.yaml b/config/default/manager_default_images.yaml index 1c98fc6c2..c78a3eacf 100644 --- a/config/default/manager_default_images.yaml +++ b/config/default/manager_default_images.yaml @@ -21,3 +21,5 @@ spec: value: quay.io/podified-antelope-centos9/openstack-nova-scheduler:current-podified - name: RELATED_IMAGE_NOVA_COMPUTE_IMAGE_URL_DEFAULT value: quay.io/podified-antelope-centos9/openstack-nova-compute:current-podified + - name: RELATED_IMAGE_PLACEMENT_API_IMAGE_URL_DEFAULT + value: quay.io/podified-antelope-centos9/openstack-placement-api:current-podified diff --git a/config/manifests/bases/nova-operator.clusterserviceversion.yaml b/config/manifests/bases/nova-operator.clusterserviceversion.yaml index 49d5abebb..b085674b2 100644 --- a/config/manifests/bases/nova-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/nova-operator.clusterserviceversion.yaml @@ -136,6 +136,15 @@ spec: displayName: TLS path: tls version: v1beta1 + - description: PlacementAPI is the Schema for the placementapis API + displayName: Placement API + kind: PlacementAPI + name: placementapis.placement.openstack.org + specDescriptors: + - description: TLS - Parameters related to the TLS + displayName: TLS + path: tls + version: v1beta1 description: Nova Operator displayName: Nova Operator install: diff --git a/config/rbac/placement_placementapi_editor_role.yaml b/config/rbac/placement_placementapi_editor_role.yaml new file mode 100644 index 000000000..0d0f01284 --- /dev/null +++ b/config/rbac/placement_placementapi_editor_role.yaml @@ -0,0 +1,31 @@ +# permissions for end users to edit placementapis. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: placementapi-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: nova-operator + app.kubernetes.io/part-of: nova-operator + app.kubernetes.io/managed-by: kustomize + name: placementapi-editor-role +rules: +- apiGroups: + - placement.openstack.org + resources: + - placementapis + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - placement.openstack.org + resources: + - placementapis/status + verbs: + - get diff --git a/config/rbac/placement_placementapi_viewer_role.yaml b/config/rbac/placement_placementapi_viewer_role.yaml new file mode 100644 index 000000000..5b36b1a1c --- /dev/null +++ b/config/rbac/placement_placementapi_viewer_role.yaml @@ -0,0 +1,27 @@ +# permissions for end users to view placementapis. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: placementapi-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: nova-operator + app.kubernetes.io/part-of: nova-operator + app.kubernetes.io/managed-by: kustomize + name: placementapi-viewer-role +rules: +- apiGroups: + - placement.openstack.org + resources: + - placementapis + verbs: + - get + - list + - watch +- apiGroups: + - placement.openstack.org + resources: + - placementapis/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index b23be6242..e7ffd1d36 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -27,6 +27,18 @@ rules: - patch - update - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - apps resources: @@ -63,6 +75,18 @@ rules: - patch - update - watch +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - "" resources: @@ -405,6 +429,33 @@ rules: - get - patch - update +- apiGroups: + - placement.openstack.org + resources: + - placementapis + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - placement.openstack.org + resources: + - placementapis/finalizers + verbs: + - patch + - update +- apiGroups: + - placement.openstack.org + resources: + - placementapis/status + verbs: + - get + - patch + - update - apiGroups: - rabbitmq.openstack.org resources: diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index 84a35fd9b..af7fabef2 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -8,4 +8,5 @@ resources: - nova_v1beta1_novacell1-upcall.yaml - nova_v1beta1_nova.yaml - nova_v1beta1_novacompute-ironic.yaml +- placement_v1beta1_placementapi.yaml #+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/placement_v1beta1_placementapi.yaml b/config/samples/placement_v1beta1_placementapi.yaml new file mode 100644 index 000000000..5d1607fa8 --- /dev/null +++ b/config/samples/placement_v1beta1_placementapi.yaml @@ -0,0 +1,12 @@ +apiVersion: placement.openstack.org/v1beta1 +kind: PlacementAPI +metadata: + labels: + app.kubernetes.io/name: placementapi + app.kubernetes.io/instance: placementapi-sample + app.kubernetes.io/part-of: nova-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: nova-operator + name: placementapi-sample +spec: + # TODO(user): Add fields here diff --git a/config/webhook/manifests.yaml b/config/webhook/manifests.yaml index ac5f1acc1..e4092e02f 100644 --- a/config/webhook/manifests.yaml +++ b/config/webhook/manifests.yaml @@ -164,6 +164,26 @@ webhooks: resources: - novaschedulers sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /mutate-placement-openstack-org-v1beta1-placementapi + failurePolicy: Fail + name: mplacementapi.kb.io + rules: + - apiGroups: + - placement.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - placementapis + sideEffects: None --- apiVersion: admissionregistration.k8s.io/v1 kind: ValidatingWebhookConfiguration @@ -330,3 +350,23 @@ webhooks: resources: - novaschedulers sideEffects: None +- admissionReviewVersions: + - v1 + clientConfig: + service: + name: webhook-service + namespace: system + path: /validate-placement-openstack-org-v1beta1-placementapi + failurePolicy: Fail + name: vplacementapi.kb.io + rules: + - apiGroups: + - placement.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - placementapis + sideEffects: None diff --git a/controllers/placement/placementapi_controller.go b/controllers/placement/placementapi_controller.go new file mode 100644 index 000000000..36927befc --- /dev/null +++ b/controllers/placement/placementapi_controller.go @@ -0,0 +1,1439 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "context" + "fmt" + "time" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + "github.com/go-logr/logr" + networkv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + keystonev1 "github.com/openstack-k8s-operators/keystone-operator/api/v1beta1" + common "github.com/openstack-k8s-operators/lib-common/modules/common" + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + deployment "github.com/openstack-k8s-operators/lib-common/modules/common/deployment" + endpoint "github.com/openstack-k8s-operators/lib-common/modules/common/endpoint" + env "github.com/openstack-k8s-operators/lib-common/modules/common/env" + helper "github.com/openstack-k8s-operators/lib-common/modules/common/helper" + job "github.com/openstack-k8s-operators/lib-common/modules/common/job" + labels "github.com/openstack-k8s-operators/lib-common/modules/common/labels" + nad "github.com/openstack-k8s-operators/lib-common/modules/common/networkattachment" + common_rbac "github.com/openstack-k8s-operators/lib-common/modules/common/rbac" + "github.com/openstack-k8s-operators/lib-common/modules/common/secret" + "github.com/openstack-k8s-operators/lib-common/modules/common/service" + "github.com/openstack-k8s-operators/lib-common/modules/common/tls" + util "github.com/openstack-k8s-operators/lib-common/modules/common/util" + + mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" + + placementv1 "github.com/openstack-k8s-operators/nova-operator/apis/placement/v1beta1" + placement "github.com/openstack-k8s-operators/nova-operator/pkg/placement" + + appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" +) + +type conditionUpdater interface { + Set(c *condition.Condition) + MarkTrue(t condition.Type, messageFormat string, messageArgs ...interface{}) +} + +type GetSecret interface { + GetSecret() string + client.Object +} + +// ensureSecret - ensures that the Secret object exists and the expected fields +// are in the Secret. It returns a hash of the values of the expected fields. +func ensureSecret( + ctx context.Context, + secretName types.NamespacedName, + expectedFields []string, + reader client.Reader, + conditionUpdater conditionUpdater, +) (string, ctrl.Result, corev1.Secret, error) { + secret := &corev1.Secret{} + err := reader.Get(ctx, secretName, secret) + if err != nil { + if k8s_errors.IsNotFound(err) { + conditionUpdater.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + fmt.Sprintf("Input data resources missing: %s", "secret/"+secretName.Name))) + return "", + ctrl.Result{}, + *secret, + fmt.Errorf("%w: Secret %s not found", err, secretName) + } + conditionUpdater.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.InputReadyErrorMessage, + err.Error())) + return "", ctrl.Result{}, *secret, err + } + + // collect the secret values the caller expects to exist + values := [][]byte{} + for _, field := range expectedFields { + val, ok := secret.Data[field] + if !ok { + err := fmt.Errorf("%w: field '%s' not found in secret/%s", util.ErrFieldNotFound, field, secretName.Name) + conditionUpdater.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.InputReadyErrorMessage, + err.Error())) + return "", ctrl.Result{}, *secret, err + } + values = append(values, val) + } + + // TODO(gibi): Do we need to watch the Secret for changes? + + hash, err := util.ObjectHash(values) + if err != nil { + conditionUpdater.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.InputReadyErrorMessage, + err.Error())) + return "", ctrl.Result{}, *secret, err + } + + return hash, ctrl.Result{}, *secret, nil +} + +// GetLogger returns a logger object with a prefix of "controller.name" and additional controller context fields +func (r *PlacementAPIReconciler) GetLogger(ctx context.Context) logr.Logger { + return log.FromContext(ctx).WithName("Controllers").WithName("PlacementAPI") +} + +// PlacementAPIReconciler reconciles a PlacementAPI object +type PlacementAPIReconciler struct { + client.Client + Kclient kubernetes.Interface + Scheme *runtime.Scheme +} + +// +kubebuilder:rbac:groups=placement.openstack.org,resources=placementapis,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=placement.openstack.org,resources=placementapis/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=placement.openstack.org,resources=placementapis/finalizers,verbs=update;patch +// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=core,resources=configmaps,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=core,resources=services,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list; +// +kubebuilder:rbac:groups=batch,resources=jobs,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=mariadb.openstack.org,resources=mariadbdatabases,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=mariadb.openstack.org,resources=mariadbdatabases/finalizers,verbs=update;patch +// +kubebuilder:rbac:groups=mariadb.openstack.org,resources=mariadbaccounts,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=mariadb.openstack.org,resources=mariadbaccounts/finalizers,verbs=update;patch +// +kubebuilder:rbac:groups=keystone.openstack.org,resources=keystoneapis,verbs=get;list;watch; +// +kubebuilder:rbac:groups=keystone.openstack.org,resources=keystoneservices,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=keystone.openstack.org,resources=keystoneendpoints,verbs=get;list;watch;create;update;patch;delete; +// +kubebuilder:rbac:groups=k8s.cni.cncf.io,resources=network-attachment-definitions,verbs=get;list;watch + +// service account, role, rolebinding +// +kubebuilder:rbac:groups="",resources=serviceaccounts,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=roles,verbs=get;list;watch;create;update;patch +// +kubebuilder:rbac:groups="rbac.authorization.k8s.io",resources=rolebindings,verbs=get;list;watch;create;update;patch +// service account permissions that are needed to grant permission to the above +// +kubebuilder:rbac:groups="security.openshift.io",resourceNames=anyuid,resources=securitycontextconstraints,verbs=use +// +kubebuilder:rbac:groups="",resources=pods,verbs=create;delete;get;list;patch;update;watch +// +kubebuilder:rbac:groups=topology.openstack.org,resources=topologies,verbs=get;list;watch;update + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// TODO(user): Modify the Reconcile function to compare the state specified by +// the PlacementAPI object against the actual cluster state, and then +// perform operations to make the cluster state reflect the state specified by +// the user. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *PlacementAPIReconciler) Reconcile(ctx context.Context, req ctrl.Request) (result ctrl.Result, _err error) { + Log := r.GetLogger(ctx) + + // Fetch the PlacementAPI instance + instance := &placementv1.PlacementAPI{} + err := r.Client.Get(ctx, req.NamespacedName, instance) + if err != nil { + if k8s_errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Owned objects are automatically garbage collected. + // For additional cleanup logic use finalizers. Return and don't requeue. + Log.Info("Placement instance not found, probably deleted before reconciled. Nothing to do.") + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + Log.Error(err, "Failed to read the Placement instance.") + return ctrl.Result{}, err + } + + h, err := helper.NewHelper( + instance, + r.Client, + r.Kclient, + r.Scheme, + Log, + ) + if err != nil { + Log.Error(err, "Failed to create lib-common Helper") + return ctrl.Result{}, err + } + + // Save a copy of the condtions so that we can restore the LastTransitionTime + // when a condition's state doesn't change. + savedConditions := instance.Status.Conditions.DeepCopy() + // initialize status fields + if err = r.initStatus(instance); err != nil { + return ctrl.Result{}, err + } + instance.Status.ObservedGeneration = instance.Generation + + // Always patch the instance status when exiting this function so we can persist any changes. + defer func() { + // Don't update the status, if reconciler Panics + if r := recover(); r != nil { + Log.Info(fmt.Sprintf("panic during reconcile %v\n", r)) + panic(r) + } + // update the Ready condition based on the sub conditions + if instance.Status.Conditions.AllSubConditionIsTrue() { + instance.Status.Conditions.MarkTrue( + condition.ReadyCondition, condition.ReadyMessage) + } else { + // something is not ready so reset the Ready condition + instance.Status.Conditions.MarkUnknown( + condition.ReadyCondition, condition.InitReason, condition.ReadyInitMessage) + // and recalculate it based on the state of the rest of the conditions + instance.Status.Conditions.Set( + instance.Status.Conditions.Mirror(condition.ReadyCondition)) + } + condition.RestoreLastTransitionTimes(&instance.Status.Conditions, savedConditions) + err := h.PatchInstance(ctx, instance) + if err != nil { + _err = err + return + } + }() + + // Handle service delete + if !instance.DeletionTimestamp.IsZero() { + return r.reconcileDelete(ctx, instance, h) + } + + // We create a KeystoneService CR later and that will automatically get the + // Nova finalizer. So we need a finalizer on the ourselves too so that + // during Nova CR delete we can have a chance to remove the finalizer from + // the our KeystoneService so that is also deleted. + updated := controllerutil.AddFinalizer(instance, h.GetFinalizer()) + if updated { + Log.Info("Added finalizer to ourselves") + // we intentionally return immediately to force the deferred function + // to persist the Instance with the finalizer. We need to have our own + // finalizer persisted before we try to create the KeystoneService with + // our finalizer to avoid orphaning the KeystoneService. + return ctrl.Result{}, nil + } + // Service account, role, binding + rbacRules := []rbacv1.PolicyRule{ + { + APIGroups: []string{"security.openshift.io"}, + ResourceNames: []string{"anyuid"}, + Resources: []string{"securitycontextconstraints"}, + Verbs: []string{"use"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"pods"}, + Verbs: []string{"create", "get", "list", "watch", "update", "patch", "delete"}, + }, + } + rbacResult, err := common_rbac.ReconcileRbac(ctx, h, instance, rbacRules) + if err != nil { + return rbacResult, err + } else if (rbacResult != ctrl.Result{}) { + return rbacResult, nil + } + // ConfigMap + configMapVars := make(map[string]env.Setter) + + // + // check for required OpenStack secret holding passwords for service/admin user and add hash to the vars map + // + hash, result, secret, err := ensureSecret( + ctx, + types.NamespacedName{Namespace: instance.Namespace, Name: instance.Spec.Secret}, + []string{ + instance.Spec.PasswordSelectors.Service, + }, + h.GetClient(), + &instance.Status.Conditions) + if err != nil { + if k8s_errors.IsNotFound(err) { + Log.Info(fmt.Sprintf("OpenStack secret %s not found", instance.Spec.Secret)) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.InputReadyWaitingMessage)) + return ctrl.Result{RequeueAfter: time.Second * 10}, nil + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.InputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.InputReadyErrorMessage, + err.Error())) + return result, err + } + configMapVars[instance.Spec.Secret] = env.SetValue(hash) + + // all our input checks out so report InputReady + instance.Status.Conditions.MarkTrue(condition.InputReadyCondition, condition.InputReadyMessage) + + // ensure MariaDBAccount exists. This account record may be created by + // openstack-operator or the cloud operator up front without a specific + // MariaDBDatabase configured yet. Otherwise, a MariaDBAccount CR is + // created here with a generated username as well as a secret with + // generated password. The MariaDBAccount is created without being + // yet associated with any MariaDBDatabase. + _, _, err = mariadbv1.EnsureMariaDBAccount( + ctx, h, instance.Spec.DatabaseAccount, + instance.Namespace, false, placement.DatabaseName, + ) + + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + mariadbv1.MariaDBAccountReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + mariadbv1.MariaDBAccountNotReadyMessage, + err.Error())) + + return ctrl.Result{}, err + } + instance.Status.Conditions.MarkTrue( + mariadbv1.MariaDBAccountReadyCondition, + mariadbv1.MariaDBAccountReadyMessage, + ) + + db, result, err := r.ensureDB(ctx, h, instance) + if err != nil { + return ctrl.Result{}, err + } else if (result != ctrl.Result{}) { + return result, nil + } + + err = r.generateServiceConfigMaps(ctx, h, instance, secret, &configMapVars, db) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.ServiceConfigReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.ServiceConfigReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + // TLS input validation + // + // Validate the CA cert secret if provided + if instance.Spec.TLS.CaBundleSecretName != "" { + hash, err := tls.ValidateCACertSecret( + ctx, + h.GetClient(), + types.NamespacedName{ + Name: instance.Spec.TLS.CaBundleSecretName, + Namespace: instance.Namespace, + }, + ) + if err != nil { + if k8s_errors.IsNotFound(err) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TLSInputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + fmt.Sprintf(condition.TLSInputReadyWaitingMessage, instance.Spec.TLS.CaBundleSecretName))) + return ctrl.Result{}, nil + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TLSInputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.TLSInputErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + + if hash != "" { + configMapVars[tls.CABundleKey] = env.SetValue(hash) + } + } + + // Validate API service certs secrets + certsHash, err := instance.Spec.TLS.API.ValidateCertSecrets(ctx, h, instance.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TLSInputReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + fmt.Sprintf(condition.TLSInputReadyWaitingMessage, err.Error()))) + return ctrl.Result{}, nil + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TLSInputReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.TLSInputErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + configMapVars[tls.TLSHashName] = env.SetValue(certsHash) + + instance.Status.Conditions.MarkTrue(condition.TLSInputReadyCondition, condition.InputReadyMessage) + + // create hash over all the different input resources to identify if any those changed + // and a restart/recreate is required. + // + inputHash, hashChanged, err := r.createHashOfInputHashes(ctx, instance, configMapVars) + if err != nil { + return ctrl.Result{}, err + } else if hashChanged { + // Hash changed and instance status should be updated (which will be done by main defer func), + // so we need to return and reconcile again + return ctrl.Result{}, nil + } + + instance.Status.Conditions.MarkTrue(condition.ServiceConfigReadyCondition, condition.ServiceConfigReadyMessage) + + serviceAnnotations, result, err := r.ensureNetworkAttachments(ctx, h, instance) + if (err != nil || result != ctrl.Result{}) { + return result, err + } + + apiEndpoints, result, err := r.ensureServiceExposed(ctx, h, instance) + + if (err != nil || result != ctrl.Result{}) { + // We can ignore RequeueAfter as we are watching the Service resource + // but we have to return while waiting for the service to be exposed + return ctrl.Result{}, err + } + + result, err = r.ensureDbSync(ctx, instance, h, serviceAnnotations) + if (err != nil || result != ctrl.Result{}) { + return result, err + } + + result, err = r.ensureDeployment(ctx, h, instance, inputHash, serviceAnnotations) + if (err != nil || result != ctrl.Result{}) { + return result, err + } + + // Only expose the service is the deployment succeeded + if !instance.Status.Conditions.IsTrue(condition.DeploymentReadyCondition) { + Log.Info("Waiting for the Deployment to become Ready before exposing the sevice in Keystone") + return ctrl.Result{}, nil + } + err = r.ensureKeystoneServiceUser(ctx, h, instance) + if err != nil { + return ctrl.Result{}, err + } + + result, err = r.ensureKeystoneEndpoint(ctx, h, instance, apiEndpoints) + if (err != nil || result != ctrl.Result{}) { + // We can ignore RequeueAfter as we are watching the KeystoneEndpoint resource + return ctrl.Result{}, err + } + + // remove finalizers from unused MariaDBAccount records + err = mariadbv1.DeleteUnusedMariaDBAccountFinalizers(ctx, h, placement.DatabaseName, instance.Spec.DatabaseAccount, instance.Namespace) + if err != nil { + return ctrl.Result{}, err + } + + return ctrl.Result{}, nil +} + +func getServiceLabels(instance *placementv1.PlacementAPI) map[string]string { + return map[string]string{ + common.AppSelector: placement.ServiceName, + common.OwnerSelector: instance.Name, + } +} + +func (r *PlacementAPIReconciler) ensureServiceExposed( + ctx context.Context, + h *helper.Helper, + instance *placementv1.PlacementAPI, +) (map[string]string, ctrl.Result, error) { + placementEndpoints := map[service.Endpoint]endpoint.Data{ + service.EndpointPublic: {Port: placement.PlacementPublicPort}, + service.EndpointInternal: {Port: placement.PlacementInternalPort}, + } + apiEndpoints := make(map[string]string) + + serviceLabels := getServiceLabels(instance) + for endpointType, data := range placementEndpoints { + endpointTypeStr := string(endpointType) + endpointName := placement.ServiceName + "-" + endpointTypeStr + + svcOverride := instance.Spec.Override.Service[endpointType] + if svcOverride.EmbeddedLabelsAnnotations == nil { + svcOverride.EmbeddedLabelsAnnotations = &service.EmbeddedLabelsAnnotations{} + } + + exportLabels := util.MergeStringMaps( + serviceLabels, + map[string]string{ + service.AnnotationEndpointKey: endpointTypeStr, + }, + ) + + // Create the service + svc, err := service.NewService( + service.GenericService(&service.GenericServiceDetails{ + Name: endpointName, + Namespace: instance.Namespace, + Labels: exportLabels, + Selector: serviceLabels, + Port: service.GenericServicePort{ + Name: endpointName, + Port: data.Port, + Protocol: corev1.ProtocolTCP, + }, + }), + 5, + &svcOverride.OverrideSpec, + ) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.CreateServiceReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.CreateServiceReadyErrorMessage, + err.Error())) + + return apiEndpoints, ctrl.Result{}, err + } + + svc.AddAnnotation(map[string]string{ + service.AnnotationEndpointKey: endpointTypeStr, + }) + + // add Annotation to whether creating an ingress is required or not + if endpointType == service.EndpointPublic && svc.GetServiceType() == corev1.ServiceTypeClusterIP { + svc.AddAnnotation(map[string]string{ + service.AnnotationIngressCreateKey: "true", + }) + } else { + svc.AddAnnotation(map[string]string{ + service.AnnotationIngressCreateKey: "false", + }) + if svc.GetServiceType() == corev1.ServiceTypeLoadBalancer { + svc.AddAnnotation(map[string]string{ + service.AnnotationHostnameKey: svc.GetServiceHostname(), // add annotation to register service name in dnsmasq + }) + } + } + + ctrlResult, err := svc.CreateOrPatch(ctx, h) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.CreateServiceReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.CreateServiceReadyErrorMessage, + err.Error())) + + return apiEndpoints, ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.CreateServiceReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.CreateServiceReadyRunningMessage)) + return apiEndpoints, ctrlResult, nil + } + // create service - end + + // if TLS is enabled + if instance.Spec.TLS.API.Enabled(endpointType) { + // set endpoint protocol to https + data.Protocol = ptr.To(service.ProtocolHTTPS) + } + + apiEndpoints[string(endpointType)], err = svc.GetAPIEndpoint( + svcOverride.EndpointURL, data.Protocol, data.Path) + if err != nil { + return apiEndpoints, ctrl.Result{}, err + } + } + + instance.Status.Conditions.MarkTrue(condition.CreateServiceReadyCondition, condition.CreateServiceReadyMessage) + return apiEndpoints, ctrl.Result{}, nil +} + +func (r *PlacementAPIReconciler) ensureNetworkAttachments( + ctx context.Context, + h *helper.Helper, + instance *placementv1.PlacementAPI, +) (map[string]string, ctrl.Result, error) { + var nadAnnotations map[string]string + var err error + + // networks to attach to + nadList := []networkv1.NetworkAttachmentDefinition{} + for _, netAtt := range instance.Spec.NetworkAttachments { + nad, err := nad.GetNADWithName(ctx, h, netAtt, instance.Namespace) + if err != nil { + if k8s_errors.IsNotFound(err) { + r.GetLogger(ctx).Info(fmt.Sprintf("network-attachment-definition %s not found", netAtt)) + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.NetworkAttachmentsReadyWaitingMessage, + netAtt)) + return nadAnnotations, ctrl.Result{RequeueAfter: time.Second * 10}, nil + } + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsReadyErrorMessage, + err.Error())) + return nadAnnotations, ctrl.Result{}, err + } + + if nad != nil { + nadList = append(nadList, *nad) + } + } + + nadAnnotations, err = nad.EnsureNetworksAnnotation(nadList) + if err != nil { + return nadAnnotations, ctrl.Result{}, fmt.Errorf("failed create network annotation from %s: %w", + instance.Spec.NetworkAttachments, err) + } + return nadAnnotations, ctrl.Result{}, nil +} + +func (r *PlacementAPIReconciler) ensureKeystoneServiceUser( + ctx context.Context, + h *helper.Helper, + instance *placementv1.PlacementAPI, +) error { + // + // create service and user in keystone - https://docs.openstack.org/placement/latest/install/install-rdo.html#configure-user-and-endpoints + // + ksSvcSpec := keystonev1.KeystoneServiceSpec{ + ServiceType: placement.ServiceName, + ServiceName: placement.ServiceName, + ServiceDescription: "Placement Service", + Enabled: true, + ServiceUser: instance.Spec.ServiceUser, + Secret: instance.Spec.Secret, + PasswordSelector: instance.Spec.PasswordSelectors.Service, + } + serviceLabels := getServiceLabels(instance) + ksSvc := keystonev1.NewKeystoneService(ksSvcSpec, instance.Namespace, serviceLabels, time.Duration(10)*time.Second) + _, err := ksSvc.CreateOrPatch(ctx, h) + if err != nil { + return err + } + // mirror the Status, Reason, Severity and Message of the latest keystoneservice condition + // into a local condition with the type condition.KeystoneServiceReadyCondition + c := ksSvc.GetConditions().Mirror(condition.KeystoneServiceReadyCondition) + if c != nil { + instance.Status.Conditions.Set(c) + } + + return nil +} + +func (r *PlacementAPIReconciler) ensureKeystoneEndpoint( + ctx context.Context, + h *helper.Helper, + instance *placementv1.PlacementAPI, + apiEndpoints map[string]string, +) (ctrl.Result, error) { + ksEndptSpec := keystonev1.KeystoneEndpointSpec{ + ServiceName: placement.ServiceName, + Endpoints: apiEndpoints, + } + ksEndpt := keystonev1.NewKeystoneEndpoint( + placement.ServiceName, + instance.Namespace, + ksEndptSpec, + getServiceLabels(instance), + time.Duration(10)*time.Second, + ) + ctrlResult, err := ksEndpt.CreateOrPatch(ctx, h) + if err != nil { + return ctrlResult, err + } + // mirror the Status, Reason, Severity and Message of the latest keystoneendpoint condition + // into a local condition with the type condition.KeystoneEndpointReadyCondition + c := ksEndpt.GetConditions().Mirror(condition.KeystoneEndpointReadyCondition) + if c != nil { + instance.Status.Conditions.Set(c) + } + + if (ctrlResult != ctrl.Result{}) { + return ctrlResult, nil + } + + return ctrlResult, nil +} + +func (r *PlacementAPIReconciler) initStatus( + instance *placementv1.PlacementAPI, +) error { + if err := r.initConditions(instance); err != nil { + return err + } + // NOTE(gibi): initialize the rest of the status fields here + // so that the reconcile loop later can assume they are not nil. + if instance.Status.Hash == nil { + instance.Status.Hash = map[string]string{} + } + if instance.Status.NetworkAttachments == nil { + instance.Status.NetworkAttachments = map[string][]string{} + } + + return nil +} + +func (r *PlacementAPIReconciler) initConditions( + instance *placementv1.PlacementAPI, +) error { + if instance.Status.Conditions == nil { + instance.Status.Conditions = condition.Conditions{} + } + // initialize conditions used later as Status=Unknown + cl := condition.CreateList( + condition.UnknownCondition( + condition.DBReadyCondition, + condition.InitReason, + condition.DBReadyInitMessage, + ), + condition.UnknownCondition( + condition.DBSyncReadyCondition, + condition.InitReason, + condition.DBSyncReadyInitMessage, + ), + condition.UnknownCondition( + condition.CreateServiceReadyCondition, + condition.InitReason, + condition.CreateServiceReadyInitMessage, + ), + condition.UnknownCondition( + condition.InputReadyCondition, + condition.InitReason, + condition.InputReadyInitMessage, + ), + condition.UnknownCondition( + condition.ServiceConfigReadyCondition, + condition.InitReason, + condition.ServiceConfigReadyInitMessage, + ), + condition.UnknownCondition( + condition.DeploymentReadyCondition, + condition.InitReason, + condition.DeploymentReadyInitMessage, + ), + // right now we have no dedicated KeystoneServiceReadyInitMessage and KeystoneEndpointReadyInitMessage + condition.UnknownCondition( + condition.KeystoneServiceReadyCondition, + condition.InitReason, + "Service registration not started", + ), + condition.UnknownCondition( + condition.KeystoneEndpointReadyCondition, + condition.InitReason, + "KeystoneEndpoint not created", + ), + condition.UnknownCondition( + condition.NetworkAttachmentsReadyCondition, + condition.InitReason, + condition.NetworkAttachmentsReadyInitMessage, + ), + // service account, role, rolebinding conditions + condition.UnknownCondition( + condition.ServiceAccountReadyCondition, + condition.InitReason, + condition.ServiceAccountReadyInitMessage, + ), + condition.UnknownCondition( + condition.RoleReadyCondition, + condition.InitReason, + condition.RoleReadyInitMessage, + ), + condition.UnknownCondition( + condition.RoleBindingReadyCondition, + condition.InitReason, + condition.RoleBindingReadyInitMessage), + condition.UnknownCondition( + condition.TLSInputReadyCondition, + condition.InitReason, + condition.InputReadyInitMessage), + ) + + // Init Topology condition if there's a reference + if instance.Spec.TopologyRef != nil { + c := condition.UnknownCondition( + condition.TopologyReadyCondition, + condition.InitReason, + condition.TopologyReadyInitMessage, + ) + cl.Set(c) + } + + instance.Status.Conditions.Init(&cl) + return nil +} + +// fields to index to reconcile when change +const ( + passwordSecretField = ".spec.secret" + caBundleSecretNameField = ".spec.tls.caBundleSecretName" // #nosec G101 + tlsAPIInternalField = ".spec.tls.api.internal.secretName" + tlsAPIPublicField = ".spec.tls.api.public.secretName" + topologyField = ".spec.topologyRef.Name" +) + +var allWatchFields = []string{ + passwordSecretField, + caBundleSecretNameField, + tlsAPIInternalField, + tlsAPIPublicField, + topologyField, +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PlacementAPIReconciler) SetupWithManager(mgr ctrl.Manager) error { + // index passwordSecretField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &placementv1.PlacementAPI{}, passwordSecretField, func(rawObj client.Object) []string { + // Extract the secret name from the spec, if one is provided + cr := rawObj.(*placementv1.PlacementAPI) + if cr.Spec.Secret == "" { + return nil + } + return []string{cr.Spec.Secret} + }); err != nil { + return err + } + + // index caBundleSecretNameField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &placementv1.PlacementAPI{}, caBundleSecretNameField, func(rawObj client.Object) []string { + // Extract the secret name from the spec, if one is provided + cr := rawObj.(*placementv1.PlacementAPI) + if cr.Spec.TLS.CaBundleSecretName == "" { + return nil + } + return []string{cr.Spec.TLS.CaBundleSecretName} + }); err != nil { + return err + } + + // index tlsAPIInternalField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &placementv1.PlacementAPI{}, tlsAPIInternalField, func(rawObj client.Object) []string { + // Extract the secret name from the spec, if one is provided + cr := rawObj.(*placementv1.PlacementAPI) + if cr.Spec.TLS.API.Internal.SecretName == nil { + return nil + } + return []string{*cr.Spec.TLS.API.Internal.SecretName} + }); err != nil { + return err + } + // index tlsAPIPublicField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &placementv1.PlacementAPI{}, tlsAPIPublicField, func(rawObj client.Object) []string { + // Extract the secret name from the spec, if one is provided + cr := rawObj.(*placementv1.PlacementAPI) + if cr.Spec.TLS.API.Public.SecretName == nil { + return nil + } + return []string{*cr.Spec.TLS.API.Public.SecretName} + }); err != nil { + return err + } + + // index topologyField + if err := mgr.GetFieldIndexer().IndexField(context.Background(), &placementv1.PlacementAPI{}, topologyField, func(rawObj client.Object) []string { + // Extract the topology name from the spec, if one is provided + cr := rawObj.(*placementv1.PlacementAPI) + if cr.Spec.TopologyRef == nil { + return nil + } + return []string{cr.Spec.TopologyRef.Name} + }); err != nil { + return err + } + + return ctrl.NewControllerManagedBy(mgr). + For(&placementv1.PlacementAPI{}). + Owns(&mariadbv1.MariaDBDatabase{}). + Owns(&mariadbv1.MariaDBAccount{}). + Owns(&keystonev1.KeystoneService{}). + Owns(&keystonev1.KeystoneEndpoint{}). + Owns(&batchv1.Job{}). + Owns(&corev1.Service{}). + Owns(&corev1.Secret{}). + Owns(&corev1.ConfigMap{}). + Owns(&appsv1.Deployment{}). + Owns(&corev1.ServiceAccount{}). + Owns(&rbacv1.Role{}). + Owns(&rbacv1.RoleBinding{}). + Watches( + &corev1.Secret{}, + handler.EnqueueRequestsFromMapFunc(r.findObjectsForSrc), + builder.WithPredicates(predicate.ResourceVersionChangedPredicate{}), + ). + Watches(&topologyv1.Topology{}, + handler.EnqueueRequestsFromMapFunc(r.findObjectsForSrc), + builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Watches(&keystonev1.KeystoneAPI{}, + handler.EnqueueRequestsFromMapFunc(r.findObjectForSrc), + builder.WithPredicates(keystonev1.KeystoneAPIStatusChangedPredicate)). + Complete(r) +} + +func (r *PlacementAPIReconciler) findObjectsForSrc(ctx context.Context, src client.Object) []reconcile.Request { + requests := []reconcile.Request{} + + l := log.FromContext(context.Background()).WithName("Controllers").WithName("PlacementAPI") + + for _, field := range allWatchFields { + crList := &placementv1.PlacementAPIList{} + listOps := &client.ListOptions{ + FieldSelector: fields.OneTermEqualSelector(field, src.GetName()), + Namespace: src.GetNamespace(), + } + err := r.List(ctx, crList, listOps) + if err != nil { + l.Error(err, fmt.Sprintf("listing %s for field: %s - %s", crList.GroupVersionKind().Kind, field, src.GetNamespace())) + return requests + } + for _, item := range crList.Items { + l.Info(fmt.Sprintf("input source %s changed, reconcile: %s - %s", src.GetName(), item.GetName(), item.GetNamespace())) + + requests = append(requests, + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: item.GetName(), + Namespace: item.GetNamespace(), + }, + }, + ) + } + } + + return requests +} + +func (r *PlacementAPIReconciler) findObjectForSrc(ctx context.Context, src client.Object) []reconcile.Request { + requests := []reconcile.Request{} + + l := log.FromContext(ctx).WithName("Controllers").WithName("PlacementAPI") + + crList := &placementv1.PlacementAPIList{} + listOps := &client.ListOptions{ + Namespace: src.GetNamespace(), + } + err := r.Client.List(ctx, crList, listOps) + if err != nil { + l.Error(err, fmt.Sprintf("listing %s for namespace: %s", crList.GroupVersionKind().Kind, src.GetNamespace())) + return requests + } + + for _, item := range crList.Items { + l.Info(fmt.Sprintf("input source %s changed, reconcile: %s - %s", src.GetName(), item.GetName(), item.GetNamespace())) + + requests = append(requests, + reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: item.GetName(), + Namespace: item.GetNamespace(), + }, + }, + ) + } + + return requests +} + +func (r *PlacementAPIReconciler) reconcileDelete(ctx context.Context, instance *placementv1.PlacementAPI, helper *helper.Helper) (ctrl.Result, error) { + Log := r.GetLogger(ctx) + Log.Info("Reconciling Service delete") + + // Remove finalizer from the referenced Topology CR + if ctrlResult, err := topologyv1.EnsureDeletedTopologyRef( + ctx, + helper, + instance.Status.LastAppliedTopology, + instance.Name, + ); err != nil { + return ctrlResult, err + } + + // remove db finalizer before the placement one + db, err := mariadbv1.GetDatabaseByNameAndAccount(ctx, helper, placement.DatabaseName, instance.Spec.DatabaseAccount, instance.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + + if !k8s_errors.IsNotFound(err) { + if err := db.DeleteFinalizer(ctx, helper); err != nil { + return ctrl.Result{}, err + } + } + + // Remove the finalizer from our KeystoneEndpoint CR + keystoneEndpoint, err := keystonev1.GetKeystoneEndpointWithName(ctx, helper, placement.ServiceName, instance.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + + if err == nil { + if controllerutil.RemoveFinalizer(keystoneEndpoint, helper.GetFinalizer()) { + err = r.Update(ctx, keystoneEndpoint) + if err != nil && !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + Log.Info("Removed finalizer from our KeystoneEndpoint") + } + } + + // Remove the finalizer from our KeystoneService CR + keystoneService, err := keystonev1.GetKeystoneServiceWithName(ctx, helper, placement.ServiceName, instance.Namespace) + if err != nil && !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + + if err == nil { + if controllerutil.RemoveFinalizer(keystoneService, helper.GetFinalizer()) { + err = r.Update(ctx, keystoneService) + if err != nil && !k8s_errors.IsNotFound(err) { + return ctrl.Result{}, err + } + Log.Info("Removed finalizer from our KeystoneService") + } + } + + // We did all the cleanup on the objects we created so we can remove the + // finalizer from ourselves to allow the deletion + controllerutil.RemoveFinalizer(instance, helper.GetFinalizer()) + Log.Info("Reconciled Service delete successfully") + return ctrl.Result{}, nil +} + +func (r *PlacementAPIReconciler) ensureDB( + ctx context.Context, + h *helper.Helper, + instance *placementv1.PlacementAPI, +) (*mariadbv1.Database, ctrl.Result, error) { + db := mariadbv1.NewDatabaseForAccount( + instance.Spec.DatabaseInstance, // mariadb/galera service to target + placement.DatabaseName, // name used in CREATE DATABASE in mariadb + placement.DatabaseName, // CR name for MariaDBDatabase + instance.Spec.DatabaseAccount, // CR name for MariaDBAccount + instance.Namespace, // namespace + ) + + // create or patch the DB + ctrlResult, err := db.CreateOrPatchAll(ctx, h) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DBReadyErrorMessage, + err.Error())) + return db, ctrl.Result{}, err + } + if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DBReadyRunningMessage)) + return db, ctrlResult, nil + } + // wait for the DB to be setup + // (ksambor) should we use WaitForDBCreatedWithTimeout instead? + ctrlResult, err = db.WaitForDBCreated(ctx, h) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DBReadyErrorMessage, + err.Error())) + return db, ctrlResult, err + } + if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DBReadyRunningMessage)) + return db, ctrlResult, nil + } + + // update Status.DatabaseHostname, used to config the service + instance.Status.DatabaseHostname = db.GetDatabaseHostname() + instance.Status.Conditions.MarkTrue(condition.DBReadyCondition, condition.DBReadyMessage) + return db, ctrlResult, nil +} + +func (r *PlacementAPIReconciler) ensureDbSync( + ctx context.Context, + instance *placementv1.PlacementAPI, + helper *helper.Helper, + serviceAnnotations map[string]string, +) (ctrl.Result, error) { + Log := r.GetLogger(ctx) + serviceLabels := getServiceLabels(instance) + dbSyncHash := instance.Status.Hash[placementv1.DbSyncHash] + jobDef := placement.DbSyncJob(instance, serviceLabels, serviceAnnotations) + dbSyncjob := job.NewJob( + jobDef, + placementv1.DbSyncHash, + instance.Spec.PreserveJobs, + time.Duration(5)*time.Second, + dbSyncHash, + ) + ctrlResult, err := dbSyncjob.DoJob( + ctx, + helper, + ) + if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBSyncReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DBSyncReadyRunningMessage)) + return ctrlResult, nil + } + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DBSyncReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DBSyncReadyErrorMessage, + err.Error())) + return ctrl.Result{}, err + } + if dbSyncjob.HasChanged() { + instance.Status.Hash[placementv1.DbSyncHash] = dbSyncjob.GetHash() + Log.Info(fmt.Sprintf("Job %s hash added - %s", jobDef.Name, instance.Status.Hash[placementv1.DbSyncHash])) + } + instance.Status.Conditions.MarkTrue(condition.DBSyncReadyCondition, condition.DBSyncReadyMessage) + + return ctrl.Result{}, nil +} + +func (r *PlacementAPIReconciler) ensureDeployment( + ctx context.Context, + h *helper.Helper, + instance *placementv1.PlacementAPI, + inputHash string, + serviceAnnotations map[string]string, +) (ctrl.Result, error) { + Log := r.GetLogger(ctx) + Log.Info("Reconciling Service") + + serviceLabels := getServiceLabels(instance) + + // + // Handle Topology + // + topology, err := topologyv1.EnsureServiceTopology( + ctx, + h, + instance.Spec.TopologyRef, + instance.Status.LastAppliedTopology, + instance.Name, + labels.GetLabelSelector(serviceLabels), + ) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.TopologyReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.TopologyReadyErrorMessage, + err.Error())) + return ctrl.Result{}, fmt.Errorf("waiting for Topology requirements: %w", err) + } + + // If TopologyRef is present and ensureServiceTopology returned a valid + // topology object, set .Status.LastAppliedTopology to the referenced one + // and mark the condition as true + if instance.Spec.TopologyRef != nil { + // update the Status with the last retrieved Topology name + instance.Status.LastAppliedTopology = instance.Spec.TopologyRef + // update the TopologyRef associated condition + instance.Status.Conditions.MarkTrue(condition.TopologyReadyCondition, condition.TopologyReadyMessage) + } else { + // remove LastAppliedTopology from the .Status + instance.Status.LastAppliedTopology = nil + } + + // Define a new Deployment object + deplDef, err := placement.Deployment(instance, inputHash, serviceLabels, serviceAnnotations, topology) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DeploymentReadyErrorMessage, + err.Error())) + } + + depl := deployment.NewDeployment( + deplDef, + time.Duration(5)*time.Second, + ) + + ctrlResult, err := depl.CreateOrPatch(ctx, h) + if err != nil { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.DeploymentReadyErrorMessage, + err.Error())) + return ctrlResult, err + } else if (ctrlResult != ctrl.Result{}) { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DeploymentReadyRunningMessage)) + return ctrl.Result{}, nil + } + + deploy := depl.GetDeployment() + if deploy.Generation == deploy.Status.ObservedGeneration { + instance.Status.ReadyCount = deploy.Status.ReadyReplicas + } + + // verify if network attachment matches expectations + networkReady, networkAttachmentStatus, err := nad.VerifyNetworkStatusFromAnnotation(ctx, h, instance.Spec.NetworkAttachments, serviceLabels, instance.Status.ReadyCount) + if err != nil { + return ctrl.Result{}, err + } + + instance.Status.NetworkAttachments = networkAttachmentStatus + if networkReady { + instance.Status.Conditions.MarkTrue(condition.NetworkAttachmentsReadyCondition, condition.NetworkAttachmentsReadyMessage) + } else { + instance.Status.Conditions.Set(condition.FalseCondition( + condition.NetworkAttachmentsReadyCondition, + condition.ErrorReason, + condition.SeverityWarning, + condition.NetworkAttachmentsErrorMessage, + instance.Spec.NetworkAttachments)) + + return ctrl.Result{}, err + } + + // Mark the Deployment as Ready only if the number of Replicas is equals + // to the Deployed instances (ReadyCount), and the the Status.Replicas + // match Status.ReadyReplicas. If a deployment update is in progress, + // Replicas > ReadyReplicas. + // In addition, make sure the controller sees the last Generation + // by comparing it with the ObservedGeneration. + if deployment.IsReady(deploy) { + instance.Status.Conditions.MarkTrue(condition.DeploymentReadyCondition, condition.DeploymentReadyMessage) + } else { + Log.Info("Deployment is not ready") + instance.Status.Conditions.Set(condition.FalseCondition( + condition.DeploymentReadyCondition, + condition.RequestedReason, + condition.SeverityInfo, + condition.DeploymentReadyRunningMessage)) + // It is OK to return success as we are watching for StatefulSet changes + return ctrl.Result{}, nil + } + // create Deployment - end + + Log.Info("Reconciled Service successfully") + return ctrl.Result{}, nil + +} + +// generateServiceConfigMaps - create create configmaps which hold scripts and service configuration +func (r *PlacementAPIReconciler) generateServiceConfigMaps( + ctx context.Context, + h *helper.Helper, + instance *placementv1.PlacementAPI, + ospSecret corev1.Secret, + envVars *map[string]env.Setter, + db *mariadbv1.Database, +) error { + // + // create Secret required for placement input + // - %-scripts secret holding scripts to e.g. bootstrap the service + // - %-config secret holding minimal placement config required to get the service up, user can add additional files to be added to the service + // - parameters which has passwords gets added from the ospSecret via the init container + // + + cmLabels := labels.GetLabels(instance, labels.GetGroupLabel(placement.ServiceName), map[string]string{}) + + var tlsCfg *tls.Service + if instance.Spec.TLS.Ca.CaBundleSecretName != "" { + tlsCfg = &tls.Service{} + } + + // customData hold any customization for the service. + // custom.conf is going to /etc//.conf.d + // my.cnf is going to /etc/my.cnf + // all other files get placed into /etc/ to allow overwrite of e.g. policy.json + customData := map[string]string{ + common.CustomServiceConfigFileName: instance.Spec.CustomServiceConfig, + "my.cnf": db.GetDatabaseClientConfig(tlsCfg), //(mschuppert) for now just get the default my.cnf + } + for key, data := range instance.Spec.DefaultConfigOverwrite { + customData[key] = data + } + + keystoneAPI, err := keystonev1.GetKeystoneAPI(ctx, h, instance.Namespace, map[string]string{}) + if err != nil { + return err + } + keystoneInternalURL, err := keystoneAPI.GetEndpoint(endpoint.EndpointInternal) + if err != nil { + return err + } + keystonePublicURL, err := keystoneAPI.GetEndpoint(endpoint.EndpointPublic) + if err != nil { + return err + } + + databaseAccount := db.GetAccount() + dbSecret := db.GetSecret() + + templateParameters := map[string]interface{}{ + "ServiceUser": instance.Spec.ServiceUser, + "KeystoneInternalURL": keystoneInternalURL, + "KeystonePublicURL": keystonePublicURL, + "PlacementPassword": string(ospSecret.Data[instance.Spec.PasswordSelectors.Service]), + "log_file": "/var/log/placement/placement-api.log", + "DatabaseConnection": fmt.Sprintf("mysql+pymysql://%s:%s@%s/%s?read_default_file=/etc/my.cnf", + databaseAccount.Spec.UserName, + string(dbSecret.Data[mariadbv1.DatabasePasswordSelector]), + instance.Status.DatabaseHostname, + placement.DatabaseName, + ), + } + + // create httpd vhost template parameters + httpdVhostConfig := map[string]interface{}{} + for _, endpt := range []service.Endpoint{service.EndpointInternal, service.EndpointPublic} { + endptConfig := map[string]interface{}{} + endptConfig["ServerName"] = fmt.Sprintf("placement-%s.%s.svc", endpt.String(), instance.Namespace) + endptConfig["TLS"] = false // default TLS to false, and set it bellow to true if enabled + if instance.Spec.TLS.API.Enabled(endpt) { + endptConfig["TLS"] = true + endptConfig["SSLCertificateFile"] = fmt.Sprintf("/etc/pki/tls/certs/%s.crt", endpt.String()) + endptConfig["SSLCertificateKeyFile"] = fmt.Sprintf("/etc/pki/tls/private/%s.key", endpt.String()) + } + httpdVhostConfig[endpt.String()] = endptConfig + } + templateParameters["VHosts"] = httpdVhostConfig + templateParameters["TimeOut"] = instance.Spec.APITimeout + + extraTemplates := map[string]string{ + "placement.conf": "placementapi/config/placement.conf", + } + + cms := []util.Template{ + // ScriptsConfigMap + { + Name: fmt.Sprintf("%s-scripts", instance.Name), + Namespace: instance.Namespace, + Type: util.TemplateTypeScripts, + InstanceType: instance.Kind, + Labels: cmLabels, + }, + // ConfigMap + { + Name: fmt.Sprintf("%s-config-data", instance.Name), + Namespace: instance.Namespace, + Type: util.TemplateTypeConfig, + InstanceType: instance.Kind, + CustomData: customData, + ConfigOptions: templateParameters, + Labels: cmLabels, + AdditionalTemplate: extraTemplates, + }, + } + return secret.EnsureSecrets(ctx, h, instance, cms, envVars) +} + +// createHashOfInputHashes - creates a hash of hashes which gets added to the resources which requires a restart +// if any of the input resources change, like configs, passwords, ... +// +// returns the hash, whether the hash changed (as a bool) and any error +func (r *PlacementAPIReconciler) createHashOfInputHashes( + ctx context.Context, + instance *placementv1.PlacementAPI, + envVars map[string]env.Setter, +) (string, bool, error) { + Log := r.GetLogger(ctx) + var hashMap map[string]string + changed := false + mergedMapVars := env.MergeEnvs([]corev1.EnvVar{}, envVars) + hash, err := util.ObjectHash(mergedMapVars) + if err != nil { + return hash, changed, err + } + if hashMap, changed = util.SetHash(instance.Status.Hash, common.InputHashName, hash); changed { + instance.Status.Hash = hashMap + Log.Info(fmt.Sprintf("Input maps hash %s - %s", common.InputHashName, hash)) + } + return hash, changed, nil +} diff --git a/controllers/placement/suite_test.go b/controllers/placement/suite_test.go new file mode 100644 index 000000000..e6ab5fb7b --- /dev/null +++ b/controllers/placement/suite_test.go @@ -0,0 +1,80 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package controllers + +import ( + "path/filepath" + "testing" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + placementv1 "github.com/openstack-k8s-operators/nova-operator/apis/placement/v1beta1" + //+kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var cfg *rest.Config +var k8sClient client.Client +var testEnv *envtest.Environment + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = placementv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/hack/clean_local_webhook.sh b/hack/clean_local_webhook.sh index db457995a..aba6e7b1a 100755 --- a/hack/clean_local_webhook.sh +++ b/hack/clean_local_webhook.sh @@ -17,3 +17,5 @@ oc delete validatingwebhookconfiguration/vnovascheduler.kb.io --ignore-not-found oc delete mutatingwebhookconfiguration/mnovascheduler.kb.io --ignore-not-found oc delete validatingwebhookconfiguration/vnovacompute.kb.io --ignore-not-found oc delete mutatingwebhookconfiguration/mnovacompute.kb.io --ignore-not-found +oc delete validatingwebhookconfiguration/vplacementapi.kb.io --ignore-not-found +oc delete mutatingwebhookconfiguration/mplacementapi.kb.io --ignore-not-found diff --git a/hack/run_with_local_webhook.sh b/hack/run_with_local_webhook.sh index 00f457050..a2de5588d 100755 --- a/hack/run_with_local_webhook.sh +++ b/hack/run_with_local_webhook.sh @@ -490,6 +490,62 @@ webhooks: scope: '*' sideEffects: None timeoutSeconds: 10 +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: ValidatingWebhookConfiguration +metadata: + name: vplacementapi.kb.io +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + caBundle: ${CA_BUNDLE} + url: https://${CRC_IP}:${WEBHOOK_PORT}/validate-placement-openstack-org-v1beta1-placementapi + failurePolicy: Fail + matchPolicy: Equivalent + name: vplacementapi.kb.io + objectSelector: {} + rules: + - apiGroups: + - placement.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - placementapis + scope: '*' + sideEffects: None + timeoutSeconds: 10 +--- +apiVersion: admissionregistration.k8s.io/v1 +kind: MutatingWebhookConfiguration +metadata: + name: mplacementapi.kb.io +webhooks: +- admissionReviewVersions: + - v1 + clientConfig: + caBundle: ${CA_BUNDLE} + url: https://${CRC_IP}:${WEBHOOK_PORT}/mutate-placement-openstack-org-v1beta1-placementapi + failurePolicy: Fail + matchPolicy: Equivalent + name: mplacementapi.kb.io + objectSelector: {} + rules: + - apiGroups: + - placement.openstack.org + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - placementapis + scope: '*' + sideEffects: None + timeoutSeconds: 10 EOF_CAT oc apply -n openstack -f ${TMPDIR}/patch_webhook_configurations.yaml diff --git a/main.go b/main.go index dc6b8fc8c..a58617aa0 100644 --- a/main.go +++ b/main.go @@ -51,9 +51,12 @@ import ( mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + novav1 "github.com/openstack-k8s-operators/nova-operator/apis/nova/v1beta1" + placementv1 "github.com/openstack-k8s-operators/nova-operator/apis/placement/v1beta1" nova_ctrl "github.com/openstack-k8s-operators/nova-operator/controllers/nova" + placement_ctrl "github.com/openstack-k8s-operators/nova-operator/controllers/placement" //+kubebuilder:scaffold:imports ) @@ -73,6 +76,7 @@ func init() { utilruntime.Must(networkv1.AddToScheme(scheme)) utilruntime.Must(memcachedv1.AddToScheme(scheme)) utilruntime.Must(topologyv1.AddToScheme(scheme)) + utilruntime.Must(placementv1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -195,6 +199,17 @@ func main() { } } + if err = (&placement_ctrl.PlacementAPIReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "PlacementAPI") + os.Exit(1) + } + if err = (&placementv1.PlacementAPI{}).SetupWebhookWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "PlacementAPI") + os.Exit(1) + } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", checker); err != nil { diff --git a/pkg/placement/const.go b/pkg/placement/const.go new file mode 100644 index 000000000..9cbd8c953 --- /dev/null +++ b/pkg/placement/const.go @@ -0,0 +1,37 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package placement + +const ( + // ServiceName - + ServiceName = "placement" + // DatabaseName - + DatabaseName = "placement" + + //config secret name + ConfigSecretName = "placement-config-data" + + // PlacementPublicPort - + PlacementPublicPort int32 = 8778 + // PlacementInternalPort - + PlacementInternalPort int32 = 8778 + + KollaServiceCommand = "/usr/local/bin/kolla_start" + + // PlacementUserID is the linux user ID used by Kolla for the placement + // user in the service containers + PlacementUserID int64 = 42482 +) diff --git a/pkg/placement/dbsync.go b/pkg/placement/dbsync.go new file mode 100644 index 000000000..289c6cfea --- /dev/null +++ b/pkg/placement/dbsync.go @@ -0,0 +1,91 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package placement + +import ( + placementv1 "github.com/openstack-k8s-operators/nova-operator/apis/placement/v1beta1" + + env "github.com/openstack-k8s-operators/lib-common/modules/common/env" + + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" +) + +// DbSyncJob func +func DbSyncJob( + instance *placementv1.PlacementAPI, + labels map[string]string, + annotations map[string]string, +) *batchv1.Job { + args := []string{"-c", KollaServiceCommand} + + envVars := map[string]env.Setter{} + envVars["KOLLA_CONFIG_STRATEGY"] = env.SetValue("COPY_ALWAYS") + envVars["KOLLA_BOOTSTRAP"] = env.SetValue("true") + + // create Volume and VolumeMounts + volumes := getVolumes(instance.Name) + volumeMounts := getVolumeMounts("dbsync") + + // add CA cert if defined + if instance.Spec.TLS.CaBundleSecretName != "" { + volumes = append(volumes, instance.Spec.TLS.CreateVolume()) + volumeMounts = append(volumeMounts, instance.Spec.TLS.CreateVolumeMounts(nil)...) + } + + job := &batchv1.Job{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name + "-db-sync", + Namespace: instance.Namespace, + Labels: labels, + }, + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + }, + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + ServiceAccountName: instance.RbacResourceName(), + Containers: []corev1.Container{ + { + Name: instance.Name + "-db-sync", + Command: []string{ + "/bin/bash", + }, + Args: args, + Image: instance.Spec.ContainerImage, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: ptr.To(PlacementUserID), + }, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: volumeMounts, + }, + }, + Volumes: volumes, + }, + }, + }, + } + + if instance.Spec.NodeSelector != nil { + job.Spec.Template.Spec.NodeSelector = *instance.Spec.NodeSelector + } + + return job +} diff --git a/pkg/placement/deployment.go b/pkg/placement/deployment.go new file mode 100644 index 000000000..482a8a1d9 --- /dev/null +++ b/pkg/placement/deployment.go @@ -0,0 +1,186 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package placement + +import ( + common "github.com/openstack-k8s-operators/lib-common/modules/common" + affinity "github.com/openstack-k8s-operators/lib-common/modules/common/affinity" + env "github.com/openstack-k8s-operators/lib-common/modules/common/env" + "github.com/openstack-k8s-operators/lib-common/modules/common/service" + "github.com/openstack-k8s-operators/lib-common/modules/common/tls" + + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + placementv1 "github.com/openstack-k8s-operators/nova-operator/apis/placement/v1beta1" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/utils/ptr" +) + +// Deployment func +func Deployment( + instance *placementv1.PlacementAPI, + configHash string, + labels map[string]string, + annotations map[string]string, + topology *topologyv1.Topology, +) (*appsv1.Deployment, error) { + livenessProbe := &corev1.Probe{ + // TODO might need tuning + TimeoutSeconds: 30, + PeriodSeconds: 30, + InitialDelaySeconds: 5, + } + readinessProbe := &corev1.Probe{ + // TODO might need tuning + TimeoutSeconds: 30, + PeriodSeconds: 30, + InitialDelaySeconds: 5, + } + + args := []string{"-c", KollaServiceCommand} + // + // https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + // + livenessProbe.HTTPGet = &corev1.HTTPGetAction{ + Port: intstr.IntOrString{Type: intstr.Int, IntVal: int32(PlacementPublicPort)}, + } + readinessProbe.HTTPGet = &corev1.HTTPGetAction{ + Port: intstr.IntOrString{Type: intstr.Int, IntVal: int32(PlacementPublicPort)}, + } + + if instance.Spec.TLS.API.Enabled(service.EndpointPublic) { + livenessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + readinessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS + } + + envVars := map[string]env.Setter{} + envVars["KOLLA_CONFIG_STRATEGY"] = env.SetValue("COPY_ALWAYS") + envVars["CONFIG_HASH"] = env.SetValue(configHash) + + // create Volume and VolumeMounts + volumes := getVolumes(instance.Name) + volumeMounts := getVolumeMounts("api") + + // add CA cert if defined + if instance.Spec.TLS.CaBundleSecretName != "" { + volumes = append(volumes, instance.Spec.TLS.CreateVolume()) + volumeMounts = append(volumeMounts, instance.Spec.TLS.CreateVolumeMounts(nil)...) + } + + for _, endpt := range []service.Endpoint{service.EndpointInternal, service.EndpointPublic} { + if instance.Spec.TLS.API.Enabled(endpt) { + var tlsEndptCfg tls.GenericService + switch endpt { + case service.EndpointPublic: + tlsEndptCfg = instance.Spec.TLS.API.Public + case service.EndpointInternal: + tlsEndptCfg = instance.Spec.TLS.API.Internal + } + + svc, err := tlsEndptCfg.ToService() + if err != nil { + return nil, err + } + volumes = append(volumes, svc.CreateVolume(endpt.String())) + volumeMounts = append(volumeMounts, svc.CreateVolumeMounts(endpt.String())...) + } + } + + deployment := &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Name: instance.Name, + Namespace: instance.Namespace, + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + Replicas: instance.Spec.Replicas, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: annotations, + Labels: labels, + }, + Spec: corev1.PodSpec{ + ServiceAccountName: instance.RbacResourceName(), + Volumes: volumes, + Containers: []corev1.Container{ + { + Name: instance.Name + "-log", + Command: []string{ + "/usr/bin/dumb-init", + }, + Args: []string{ + "--single-child", + "--", + "/usr/bin/tail", + "-n+1", + "-F", + "/var/log/placement/placement-api.log", + }, + Image: instance.Spec.ContainerImage, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: ptr.To(PlacementUserID), + }, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: volumeMounts, + Resources: instance.Spec.Resources, + ReadinessProbe: readinessProbe, + LivenessProbe: livenessProbe, + }, + { + Name: instance.Name + "-api", + Command: []string{ + "/bin/bash", + }, + Args: args, + Image: instance.Spec.ContainerImage, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: ptr.To(PlacementUserID), + }, + Env: env.MergeEnvs([]corev1.EnvVar{}, envVars), + VolumeMounts: volumeMounts, + Resources: instance.Spec.Resources, + ReadinessProbe: readinessProbe, + LivenessProbe: livenessProbe, + }, + }, + }, + }, + }, + } + if instance.Spec.NodeSelector != nil { + deployment.Spec.Template.Spec.NodeSelector = *instance.Spec.NodeSelector + } + if topology != nil { + topology.ApplyTo(&deployment.Spec.Template) + } else { + // If possible two pods of the same service should not + // run on the same worker node. If this is not possible + // the get still created on the same worker node. + deployment.Spec.Template.Spec.Affinity = affinity.DistributePods( + common.AppSelector, + []string{ + ServiceName, + }, + corev1.LabelHostname, + ) + } + return deployment, nil +} diff --git a/pkg/placement/volumes.go b/pkg/placement/volumes.go new file mode 100644 index 000000000..fa60ef15e --- /dev/null +++ b/pkg/placement/volumes.go @@ -0,0 +1,81 @@ +/* + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package placement + +import ( + corev1 "k8s.io/api/core/v1" +) + +// getVolumes - service volumes +func getVolumes(name string) []corev1.Volume { + var scriptsVolumeDefaultMode int32 = 0755 + var configMode int32 = 0640 + + return []corev1.Volume{ + { + Name: "scripts", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &scriptsVolumeDefaultMode, + SecretName: name + "-scripts", + }, + }, + }, + { + Name: "config-data", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + DefaultMode: &configMode, + SecretName: name + "-config-data", + }, + }, + }, + { + Name: "logs", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{Medium: ""}, + }, + }, + } + +} + +// getVolumeMounts - general VolumeMounts +func getVolumeMounts(serviceName string) []corev1.VolumeMount { + return []corev1.VolumeMount{ + { + Name: "scripts", + MountPath: "/usr/local/bin/container-scripts", + ReadOnly: true, + }, + { + Name: "logs", + MountPath: "/var/log/placement", + ReadOnly: false, + }, + { + Name: "config-data", + MountPath: "/var/lib/openstack/config", + ReadOnly: false, + }, + { + Name: "config-data", + MountPath: "/var/lib/kolla/config_files/config.json", + SubPath: "placement-" + serviceName + "-config.json", + ReadOnly: true, + }, + } +} diff --git a/templates/placementapi/config/httpd.conf b/templates/placementapi/config/httpd.conf new file mode 100644 index 000000000..7c53ffd7c --- /dev/null +++ b/templates/placementapi/config/httpd.conf @@ -0,0 +1,81 @@ +ServerTokens Prod +ServerSignature Off +TraceEnable Off +PidFile run/httpd.pid +ServerRoot "/etc/httpd" +ServerName "localhost.localdomain" + +User apache +Group apache + +Listen 8778 + +TypesConfig /etc/mime.types + +Include conf.modules.d/*.conf +# XXX: To disable SSL +#+ exec /usr/sbin/httpd +#AH00526: Syntax error on line 85 of /etc/httpd/conf.d/ssl.conf: +#SSLCertificateFile: file '/etc/pki/tls/certs/localhost.crt' does not exist or is empty +#Include conf.d/*.conf + +LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined +LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" proxy + +SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded +ErrorLog /dev/stderr +TransferLog /dev/stdout +CustomLog /dev/stdout combined env=!forwarded +CustomLog /dev/stdout proxy env=forwarded + +{{ range $endpt, $vhost := .VHosts }} +# {{ $endpt }} vhost {{ $vhost.ServerName }} configuration + + = 2.4> + ErrorLogFormat "%M" + + ServerName {{ $vhost.ServerName }} + TimeOut {{ $.TimeOut }} + + ## Vhost docroot + ErrorLog /dev/stdout + SetEnvIf X-Forwarded-For "^.*\..*\..*\..*" forwarded + CustomLog /dev/stdout combined env=!forwarded + CustomLog /dev/stdout proxy env=forwarded + ServerSignature Off + CustomLog /dev/stdout combined + +{{- if $vhost.TLS }} + SetEnvIf X-Forwarded-Proto https HTTPS=1 + + ## SSL directives + SSLEngine on + SSLCertificateFile "{{ $vhost.SSLCertificateFile }}" + SSLCertificateKeyFile "{{ $vhost.SSLCertificateKeyFile }}" +{{- end }} + + ## Directories, there should at least be a declaration for /var/www/cgi-bin/placement + + Options -Indexes +FollowSymLinks +MultiViews + AllowOverride None + Require all granted + + + ## WSGI configuration + WSGIProcessGroup placement-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + WSGIDaemonProcess {{ $endpt }} display-name={{ $endpt }} group=placement processes=3 threads=1 user=placement + WSGIProcessGroup {{ $endpt }} + WSGIScriptAlias / /usr/bin/placement-api + +{{ end }} + +Alias /placement-api /usr/bin/placement-api + + SetHandler wsgi-script + Options +ExecCGI + WSGIProcessGroup placement-api + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + diff --git a/templates/placementapi/config/placement-api-config.json b/templates/placementapi/config/placement-api-config.json new file mode 100644 index 000000000..e7d222f5f --- /dev/null +++ b/templates/placementapi/config/placement-api-config.json @@ -0,0 +1,70 @@ +{ + "command": "/usr/sbin/httpd -DFOREGROUND", + "config_files": [ + { + "source": "/var/lib/openstack/config/placement.conf", + "dest": "/etc/placement/placement.conf", + "owner": "placement", + "perm": "0600" + }, + { + "source": "/var/lib/openstack/config/httpd.conf", + "dest": "/etc/httpd/conf/httpd.conf", + "owner": "apache", + "perm": "0644" + }, + { + "source": "/var/lib/openstack/config/custom.conf", + "dest": "/etc/placement/placement.conf.d/custom.conf", + "owner": "placement", + "perm": "0600" + }, + { + "source": "/var/lib/openstack/config/ssl.conf", + "dest": "/etc/httpd/conf.d/ssl.conf", + "owner": "apache", + "perm": "0644" + }, + { + "source": "/var/lib/config-data/tls/certs/*", + "dest": "/etc/pki/tls/certs/", + "owner": "placement", + "perm": "0440", + "optional": true, + "merge": true + }, + { + "source": "/var/lib/config-data/tls/private/*", + "dest": "/etc/pki/tls/private/", + "owner": "placement", + "perm": "0400", + "optional": true, + "merge": true + }, + { + "source": "/var/lib/openstack/config/policy.yaml", + "dest": "/etc/placement/policy.yaml", + "owner": "placement", + "perm": "0600", + "optional": true + }, + { + "source": "/var/lib/openstack/config/my.cnf", + "dest": "/etc/my.cnf", + "owner": "placement", + "perm": "0644" + } + ], + "permissions": [ + { + "path": "/var/log/placement", + "owner": "placement:apache", + "recurse": true + }, + { + "path": "/etc/httpd/run/", + "owner": "placement:apache", + "recurse": true + } + ] +} diff --git a/templates/placementapi/config/placement-dbsync-config.json b/templates/placementapi/config/placement-dbsync-config.json new file mode 100644 index 000000000..de04aa6db --- /dev/null +++ b/templates/placementapi/config/placement-dbsync-config.json @@ -0,0 +1,23 @@ +{ + "command": "placement-manage db sync", + "config_files": [ + { + "source": "/var/lib/openstack/config/placement.conf", + "dest": "/etc/placement/placement.conf", + "owner": "placement", + "perm": "0600" + }, + { + "source": "/var/lib/openstack/config/custom.conf", + "dest": "/etc/placement/placement.conf.d/custom.conf", + "owner": "placement", + "perm": "0600" + }, + { + "source": "/var/lib/openstack/config/my.cnf", + "dest": "/etc/my.cnf", + "owner": "placement", + "perm": "0644" + } + ] +} diff --git a/templates/placementapi/config/placement.conf b/templates/placementapi/config/placement.conf new file mode 100644 index 000000000..2cdbd2f8a --- /dev/null +++ b/templates/placementapi/config/placement.conf @@ -0,0 +1,29 @@ +[DEFAULT] +# enable log rotation in oslo config by default +max_logfile_count=5 +max_logfile_size_mb=50 +log_rotation_type=size +{{if (index . "log_file") }} +log_file = {{ .log_file }} +{{end}} +debug = true + +[placement_database] +connection = {{ .DatabaseConnection }} + +[api] +auth_strategy = keystone + +[keystone_authtoken] +project_domain_name = Default +user_domain_name = Default +project_name = service +username = {{ .ServiceUser }} +password = {{ .PlacementPassword }} +www_authenticate_uri = {{ .KeystonePublicURL }} +auth_url = {{ .KeystoneInternalURL }} +auth_type = password +interface = internal + +[oslo_policy] +policy_file=/etc/placement/policy.yaml diff --git a/templates/placementapi/config/ssl.conf b/templates/placementapi/config/ssl.conf new file mode 100644 index 000000000..e3da4ecb2 --- /dev/null +++ b/templates/placementapi/config/ssl.conf @@ -0,0 +1,21 @@ + + SSLRandomSeed startup builtin + SSLRandomSeed startup file:/dev/urandom 512 + SSLRandomSeed connect builtin + SSLRandomSeed connect file:/dev/urandom 512 + + AddType application/x-x509-ca-cert .crt + AddType application/x-pkcs7-crl .crl + + SSLPassPhraseDialog builtin + SSLSessionCache "shmcb:/var/cache/mod_ssl/scache(512000)" + SSLSessionCacheTimeout 300 + Mutex default + SSLCryptoDevice builtin + SSLHonorCipherOrder On + SSLUseStapling Off + SSLStaplingCache "shmcb:/run/httpd/ssl_stapling(32768)" + SSLCipherSuite HIGH:MEDIUM:!aNULL:!MD5:!RC4:!3DES + SSLProtocol all -SSLv2 -SSLv3 -TLSv1 + SSLOptions StdEnvVars + diff --git a/test/functional/placement/base_test.go b/test/functional/placement/base_test.go new file mode 100644 index 000000000..c1898e564 --- /dev/null +++ b/test/functional/placement/base_test.go @@ -0,0 +1,220 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package functional_test + +import ( + "fmt" + . "github.com/onsi/gomega" //revive:disable:dot-imports + + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + placementv1 "github.com/openstack-k8s-operators/nova-operator/apis/placement/v1beta1" + "github.com/openstack-k8s-operators/nova-operator/pkg/placement" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type Names struct { + Namespace string + PlacementAPIName types.NamespacedName + ConfigMapName types.NamespacedName + DBSyncJobName types.NamespacedName + MariaDBDatabaseName types.NamespacedName + MariaDBAccount types.NamespacedName + DeploymentName types.NamespacedName + PublicServiceName types.NamespacedName + InternalServiceName types.NamespacedName + KeystoneServiceName types.NamespacedName + KeystoneEndpointName types.NamespacedName + ServiceAccountName types.NamespacedName + RoleName types.NamespacedName + RoleBindingName types.NamespacedName + CaBundleSecretName types.NamespacedName + InternalCertSecretName types.NamespacedName + PublicCertSecretName types.NamespacedName + PlacementAPITopologies []types.NamespacedName +} + +func CreateNames(placementAPIName types.NamespacedName) Names { + return Names{ + Namespace: placementAPIName.Namespace, + PlacementAPIName: placementAPIName, + ConfigMapName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: placementAPIName.Name + "-config-data"}, + DBSyncJobName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: placementAPIName.Name + "-db-sync"}, + MariaDBDatabaseName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: placement.DatabaseName}, + MariaDBAccount: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: AccountName}, + DeploymentName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: placementAPIName.Name}, + PublicServiceName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: "placement-public"}, + InternalServiceName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: "placement-internal"}, + KeystoneServiceName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: "placement"}, + KeystoneEndpointName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: "placement"}, + ServiceAccountName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: "placement-" + placementAPIName.Name}, + RoleName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: "placement-" + placementAPIName.Name + "-role"}, + RoleBindingName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: "placement-" + placementAPIName.Name + "-rolebinding"}, + CaBundleSecretName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: CABundleSecretName}, + InternalCertSecretName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: InternalCertSecretName}, + PublicCertSecretName: types.NamespacedName{ + Namespace: placementAPIName.Namespace, + Name: PublicCertSecretName}, + PlacementAPITopologies: []types.NamespacedName{ + { + Namespace: namespace, + Name: fmt.Sprintf("%s-topology", placementAPIName.Name), + }, + { + Namespace: namespace, + Name: fmt.Sprintf("%s-topology-alt", placementAPIName.Name), + }, + }, + } +} + +func GetDefaultPlacementAPISpec() map[string]interface{} { + return map[string]interface{}{ + "databaseInstance": "openstack", + "secret": SecretName, + "databaseAccount": AccountName, + } +} + +func GetTLSPlacementAPISpec(names Names) map[string]interface{} { + return map[string]interface{}{ + "databaseInstance": "openstack", + "replicas": 1, + "secret": SecretName, + "databaseAccount": AccountName, + "tls": map[string]interface{}{ + "api": map[string]interface{}{ + "internal": map[string]interface{}{ + "secretName": names.InternalCertSecretName.Name, + }, + "public": map[string]interface{}{ + "secretName": names.PublicCertSecretName.Name, + }, + }, + "caBundleSecretName": names.CaBundleSecretName.Name, + }, + } +} + +func CreatePlacementAPI(name types.NamespacedName, spec map[string]interface{}) client.Object { + + raw := map[string]interface{}{ + "apiVersion": "placement.openstack.org/v1beta1", + "kind": "PlacementAPI", + "metadata": map[string]interface{}{ + "name": name.Name, + "namespace": name.Namespace, + }, + "spec": spec, + } + return th.CreateUnstructured(raw) +} + +func GetPlacementAPI(name types.NamespacedName) *placementv1.PlacementAPI { + instance := &placementv1.PlacementAPI{} + Eventually(func(g Gomega) { + g.Expect(k8sClient.Get(ctx, name, instance)).Should(Succeed()) + }, timeout, interval).Should(Succeed()) + return instance +} + +func CreatePlacementAPISecret(namespace string, name string) *corev1.Secret { + return th.CreateSecret( + types.NamespacedName{Namespace: namespace, Name: name}, + map[string][]byte{ + "PlacementPassword": []byte("12345678"), + "PlacementDatabasePassword": []byte("12345678"), + }, + ) +} + +func PlacementConditionGetter(name types.NamespacedName) condition.Conditions { + instance := GetPlacementAPI(name) + return instance.Status.Conditions +} + +// GetSampleTopologySpec - A sample (and opinionated) Topology Spec used to +// test PlacementAPI +// Note this is just an example that should not be used in production for +// multiple reasons: +// 1. It uses ScheduleAnyway as strategy, which is something we might +// want to avoid by default +// 2. Usually a topologySpreadConstraints is used to take care about +// multi AZ, which is not applicable in this context +func GetSampleTopologySpec(label string) (map[string]interface{}, []corev1.TopologySpreadConstraint) { + // Build the topology Spec + topologySpec := map[string]interface{}{ + "topologySpreadConstraints": []map[string]interface{}{ + { + "maxSkew": 1, + "topologyKey": corev1.LabelHostname, + "whenUnsatisfiable": "ScheduleAnyway", + "labelSelector": map[string]interface{}{ + "matchLabels": map[string]interface{}{ + "service": placement.ServiceName, + "topology": label, + }, + }, + }, + }, + } + // Build the topologyObj representation + topologySpecObj := []corev1.TopologySpreadConstraint{ + { + MaxSkew: 1, + TopologyKey: corev1.LabelHostname, + WhenUnsatisfiable: corev1.ScheduleAnyway, + LabelSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "service": placement.ServiceName, + "topology": label, + }, + }, + }, + } + return topologySpec, topologySpecObj +} diff --git a/test/functional/placement/placementapi_controller_test.go b/test/functional/placement/placementapi_controller_test.go new file mode 100644 index 000000000..ac42960ce --- /dev/null +++ b/test/functional/placement/placementapi_controller_test.go @@ -0,0 +1,1441 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package functional_test + +import ( + "fmt" + "os" + + . "github.com/onsi/ginkgo/v2" //revive:disable:dot-imports + . "github.com/onsi/gomega" //revive:disable:dot-imports + + //revive:disable-next-line:dot-imports + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + keystonev1 "github.com/openstack-k8s-operators/keystone-operator/api/v1beta1" + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + . "github.com/openstack-k8s-operators/lib-common/modules/common/test/helpers" + mariadb_test "github.com/openstack-k8s-operators/mariadb-operator/api/test/helpers" + mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" + "github.com/openstack-k8s-operators/nova-operator/pkg/placement" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +var _ = Describe("PlacementAPI controller", func() { + + BeforeEach(func() { + // lib-common uses OPERATOR_TEMPLATES env var to locate the "templates" + // directory of the operator. We need to set them othervise lib-common + // will fail to generate the ConfigMap as it does not find common.sh + err := os.Setenv("OPERATOR_TEMPLATES", "../../../templates") + Expect(err).NotTo(HaveOccurred()) + }) + + When("A PlacementAPI instance is created", func() { + BeforeEach(func() { + DeferCleanup( + th.DeleteInstance, + CreatePlacementAPI(names.PlacementAPIName, GetDefaultPlacementAPISpec()), + ) + }) + + It("should have the Spec fields defaulted", func() { + Placement := GetPlacementAPI(names.PlacementAPIName) + Expect(Placement.Spec.DatabaseInstance).Should(Equal("openstack")) + Expect(Placement.Spec.DatabaseAccount).Should(Equal(AccountName)) + Expect(Placement.Spec.ServiceUser).Should(Equal("placement")) + Expect(*(Placement.Spec.Replicas)).Should(Equal(int32(1))) + }) + + It("should have the Status fields initialized", func() { + Placement := GetPlacementAPI(names.PlacementAPIName) + Expect(Placement.Status.Hash).To(BeEmpty()) + Expect(Placement.Status.DatabaseHostname).To(Equal("")) + Expect(Placement.Status.ReadyCount).To(Equal(int32(0))) + }) + + It("should have a finalizer", func() { + // the reconciler loop adds the finalizer so we have to wait for + // it to run + Eventually(func() []string { + return GetPlacementAPI(names.PlacementAPIName).Finalizers + }, timeout, interval).Should(ContainElement("openstack.org/placementapi")) + }) + + It("should not create a config map", func() { + th.AssertConfigMapDoesNotExist(names.ConfigMapName) + }) + + It("should have input not ready and unknown Conditions initialized", func() { + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.InputReadyCondition, + corev1.ConditionFalse, + ) + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.RoleBindingReadyCondition, + corev1.ConditionTrue, + ) + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.RoleReadyCondition, + corev1.ConditionTrue, + ) + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ServiceAccountReadyCondition, + corev1.ConditionTrue, + ) + unknownConditions := []condition.Type{ + condition.DBReadyCondition, + condition.DBSyncReadyCondition, + condition.CreateServiceReadyCondition, + condition.ServiceConfigReadyCondition, + condition.DeploymentReadyCondition, + condition.KeystoneServiceReadyCondition, + condition.KeystoneEndpointReadyCondition, + condition.NetworkAttachmentsReadyCondition, + condition.TLSInputReadyCondition, + } + + placement := GetPlacementAPI(names.PlacementAPIName) + // +5 as InputReady, Ready, Service and Role are ready is False asserted above + Expect(placement.Status.Conditions).To(HaveLen(len(unknownConditions) + 5)) + + for _, cond := range unknownConditions { + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + cond, + corev1.ConditionUnknown, + ) + } + }) + }) + + When("starts zero replicas", func() { + BeforeEach(func() { + spec := GetDefaultPlacementAPISpec() + spec["replicas"] = 0 + DeferCleanup( + th.DeleteInstance, + CreatePlacementAPI(names.PlacementAPIName, spec), + ) + DeferCleanup( + k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + keystoneAPIName := keystone.CreateKeystoneAPI(namespace) + DeferCleanup(keystone.DeleteKeystoneAPI, keystoneAPIName) + + }) + + It("and deployment is Ready", func() { + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + placement := GetPlacementAPI(names.PlacementAPIName) + Expect(*(placement.Spec.Replicas)).Should(Equal(int32(0))) + Expect(placement.Status.ReadyCount).Should(Equal(int32(0))) + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionTrue, + ) + }) + }) + + When("a secret is provided with missing fields", func() { + BeforeEach(func() { + DeferCleanup( + th.DeleteInstance, + CreatePlacementAPI(names.PlacementAPIName, GetDefaultPlacementAPISpec()), + ) + DeferCleanup( + k8sClient.Delete, ctx, + th.CreateSecret( + types.NamespacedName{Namespace: namespace, Name: SecretName}, + map[string][]byte{}), + ) + }) + It("reports that input is not ready", func() { + // FIXME(gibi): This is a bug as placement controller does not + // check the content of the Secret so eventually a dbsync job is + // created with incorrect config + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.InputReadyCondition, + corev1.ConditionFalse, + ) + }) + }) + + When("the proper secret is provided", func() { + BeforeEach(func() { + DeferCleanup( + th.DeleteInstance, + CreatePlacementAPI(names.PlacementAPIName, GetDefaultPlacementAPISpec()), + ) + DeferCleanup( + k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + db := mariadb.GetMariaDBDatabase(names.MariaDBDatabaseName) + Expect(db.Spec.Name).To(Equal(names.MariaDBDatabaseName.Name)) + + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + }) + + It("should have input ready", func() { + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.InputReadyCondition, + corev1.ConditionTrue, + ) + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ServiceConfigReadyCondition, + corev1.ConditionFalse, + ) + }) + + It("should not create a config map", func() { + th.AssertConfigMapDoesNotExist(names.ConfigMapName) + }) + }) + + When("keystoneAPI instance is available", func() { + var keystoneAPI *keystonev1.KeystoneAPI + + BeforeEach(func() { + spec := GetDefaultPlacementAPISpec() + spec["customServiceConfig"] = "foo = bar" + spec["defaultConfigOverwrite"] = map[string]interface{}{ + "policy.yaml": "\"placement:resource_providers:list\": \"!\"", + } + DeferCleanup(th.DeleteInstance, CreatePlacementAPI(names.PlacementAPIName, spec)) + DeferCleanup( + k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + keystoneAPIName := keystone.CreateKeystoneAPI(namespace) + keystoneAPI = keystone.GetKeystoneAPI(keystoneAPIName) + DeferCleanup(keystone.DeleteKeystoneAPI, keystoneAPIName) + }) + + It("creates MariaDB database", func() { + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DBReadyCondition, + corev1.ConditionFalse, + ) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + db := mariadb.GetMariaDBDatabase(names.MariaDBDatabaseName) + Expect(db.Spec.Name).To(Equal(names.MariaDBDatabaseName.Name)) + + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DBReadyCondition, + corev1.ConditionTrue, + ) + }) + + It("should have config ready", func() { + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + db := mariadb.GetMariaDBDatabase(names.MariaDBDatabaseName) + Expect(db.Spec.Name).To(Equal(names.MariaDBDatabaseName.Name)) + + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ServiceConfigReadyCondition, + corev1.ConditionTrue, + ) + }) + + It("should create a configuration Secret", func() { + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + cm := th.GetSecret(names.ConfigMapName) + + conf := cm.Data["placement.conf"] + Expect(conf).Should( + ContainSubstring("auth_url = %s", keystoneAPI.Status.APIEndpoints["internal"])) + Expect(conf).Should( + ContainSubstring("www_authenticate_uri = %s", keystoneAPI.Status.APIEndpoints["public"])) + Expect(conf).Should( + ContainSubstring("username = placement")) + Expect(conf).Should( + ContainSubstring("password = 12345678")) + + mariadbAccount := mariadb.GetMariaDBAccount(names.MariaDBAccount) + mariadbSecret := th.GetSecret(types.NamespacedName{Name: mariadbAccount.Spec.Secret, Namespace: names.PlacementAPIName.Namespace}) + + Expect(string(conf)).Should( + ContainSubstring(fmt.Sprintf("connection = mysql+pymysql://%s:%s@hostname-for-openstack.%s.svc/placement?read_default_file=/etc/my.cnf", + mariadbAccount.Spec.UserName, mariadbSecret.Data[mariadbv1.DatabasePasswordSelector], namespace))) + + custom := cm.Data["custom.conf"] + Expect(custom).Should(ContainSubstring("foo = bar")) + + policy := cm.Data["policy.yaml"] + Expect(policy).Should( + ContainSubstring("\"placement:resource_providers:list\": \"!\"")) + + myCnf := cm.Data["my.cnf"] + Expect(myCnf).To( + ContainSubstring("[client]\nssl=0")) + configData := cm.Data["httpd.conf"] + Expect(configData).Should( + ContainSubstring("TimeOut 60")) + }) + + It("creates service account, role and rolebindig", func() { + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ServiceAccountReadyCondition, + corev1.ConditionTrue, + ) + sa := th.GetServiceAccount(names.ServiceAccountName) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.RoleReadyCondition, + corev1.ConditionTrue, + ) + role := th.GetRole(names.RoleName) + Expect(role.Rules).To(HaveLen(2)) + Expect(role.Rules[0].Resources).To(Equal([]string{"securitycontextconstraints"})) + Expect(role.Rules[1].Resources).To(Equal([]string{"pods"})) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.RoleBindingReadyCondition, + corev1.ConditionTrue, + ) + binding := th.GetRoleBinding(names.RoleBindingName) + Expect(binding.RoleRef.Name).To(Equal(role.Name)) + Expect(binding.Subjects).To(HaveLen(1)) + Expect(binding.Subjects[0].Name).To(Equal(sa.Name)) + }) + + It("creates keystone service", func() { + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.KeystoneServiceReadyCondition, + corev1.ConditionUnknown, + ) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.KeystoneServiceReadyCondition, + corev1.ConditionTrue, + ) + }) + It("creates keystone endpoint", func() { + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.KeystoneEndpointReadyCondition, + corev1.ConditionUnknown, + ) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.KeystoneEndpointReadyCondition, + corev1.ConditionTrue, + ) + }) + It("runs db sync", func() { + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DBSyncReadyCondition, + corev1.ConditionFalse, + ) + + job := th.GetJob(names.DBSyncJobName) + Expect(job.Spec.Template.Spec.Volumes).To(HaveLen(3)) + Expect(job.Spec.Template.Spec.Containers).To(HaveLen(1)) + + container := job.Spec.Template.Spec.Containers[0] + Expect(container.VolumeMounts).To(HaveLen(4)) + Expect(container.Image).To(Equal("quay.io/podified-antelope-centos9/openstack-placement-api:current-podified")) + + th.SimulateJobSuccess(names.DBSyncJobName) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DBSyncReadyCondition, + corev1.ConditionTrue, + ) + }) + It("creates deployment", func() { + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + th.SimulateJobSuccess(names.DBSyncJobName) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionUnknown, + ) + + deployment := th.GetDeployment(names.DeploymentName) + Expect(int(*deployment.Spec.Replicas)).To(Equal(1)) + Expect(deployment.Spec.Selector.MatchLabels).To(Equal(map[string]string{"service": "placement", "owner": names.PlacementAPIName.Name})) + Expect(deployment.Spec.Template.Spec.ServiceAccountName).To(Equal(names.ServiceAccountName.Name)) + Expect(deployment.Spec.Template.Spec.Containers).To(HaveLen(2)) + + th.SimulateDeploymentReplicaReady(names.DeploymentName) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionTrue, + ) + }) + It("exposes the service", func() { + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.CreateServiceReadyCondition, + corev1.ConditionUnknown, + ) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + + public := th.GetService(names.PublicServiceName) + Expect(public.Labels["service"]).To(Equal("placement")) + internal := th.GetService(names.InternalServiceName) + Expect(internal.Labels["service"]).To(Equal("placement")) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.CreateServiceReadyCondition, + corev1.ConditionTrue, + ) + }) + + It("reports ready when successfully deployed", func() { + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ReadyCondition, + corev1.ConditionTrue, + ) + }) + }) + + When("Deployment rollout is progressing", func() { + BeforeEach(func() { + spec := GetDefaultPlacementAPISpec() + DeferCleanup(th.DeleteInstance, CreatePlacementAPI(names.PlacementAPIName, spec)) + DeferCleanup( + k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + DeferCleanup(keystone.DeleteKeystoneAPI, keystone.CreateKeystoneAPI(namespace)) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentProgressing(names.DeploymentName) + }) + + It("shows the deployment progressing in DeploymentReadyCondition", func() { + th.ExpectConditionWithDetails( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionFalse, + condition.RequestedReason, + condition.DeploymentReadyRunningMessage, + ) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionFalse, + ) + }) + + It("still shows the deployment progressing in DeploymentReadyCondition when rollout hits ProgressDeadlineExceeded", func() { + th.SimulateDeploymentProgressDeadlineExceeded(names.DeploymentName) + th.ExpectConditionWithDetails( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionFalse, + condition.RequestedReason, + condition.DeploymentReadyRunningMessage, + ) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionFalse, + ) + }) + + It("reaches Ready when deployment rollout finished", func() { + th.ExpectConditionWithDetails( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionFalse, + condition.RequestedReason, + condition.DeploymentReadyRunningMessage, + ) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionFalse, + ) + + th.SimulateDeploymentReplicaReady(names.DeploymentName) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionTrue, + ) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.DeploymentReadyCondition, + corev1.ConditionTrue, + ) + }) + }) + + When("A PlacementAPI is created with service override", func() { + BeforeEach(func() { + DeferCleanup(k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + DeferCleanup(keystone.DeleteKeystoneAPI, keystone.CreateKeystoneAPI(namespace)) + + spec := GetDefaultPlacementAPISpec() + serviceOverride := map[string]interface{}{} + serviceOverride["internal"] = map[string]interface{}{ + "metadata": map[string]map[string]string{ + "annotations": { + "dnsmasq.network.openstack.org/hostname": "placement-internal.openstack.svc", + "metallb.universe.tf/address-pool": "osp-internalapi", + "metallb.universe.tf/allow-shared-ip": "osp-internalapi", + "metallb.universe.tf/loadBalancerIPs": "internal-lb-ip-1,internal-lb-ip-2", + }, + "labels": { + "internal": "true", + "service": "placement", + }, + }, + "spec": map[string]interface{}{ + "type": "LoadBalancer", + }, + } + + spec["override"] = map[string]interface{}{ + "service": serviceOverride, + } + + placementAPI := CreatePlacementAPI(names.PlacementAPIName, spec) + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService( + namespace, + GetPlacementAPI(names.PlacementAPIName).Spec.DatabaseInstance, + corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{Port: 3306}}, + }, + ), + ) + + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + th.SimulateLoadBalancerServiceIP(types.NamespacedName{Namespace: namespace, Name: "placement-internal"}) + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + DeferCleanup(th.DeleteInstance, placementAPI) + }) + + It("creates KeystoneEndpoint", func() { + keystoneEndpoint := keystone.GetKeystoneEndpoint(names.KeystoneEndpointName) + endpoints := keystoneEndpoint.Spec.Endpoints + Expect(endpoints).To(HaveKeyWithValue("public", "http://placement-public."+namespace+".svc:8778")) + Expect(endpoints).To(HaveKeyWithValue("internal", "http://placement-internal."+namespace+".svc:8778")) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.KeystoneEndpointReadyCondition, + corev1.ConditionTrue, + ) + }) + + It("creates LoadBalancer service", func() { + // As the internal endpoint is configured in ExternalEndpoints it + // gets a LoadBalancer Service with MetalLB annotations + service := th.GetService(types.NamespacedName{Namespace: namespace, Name: "placement-internal"}) + Expect(service.Annotations).To( + HaveKeyWithValue("dnsmasq.network.openstack.org/hostname", "placement-internal.openstack.svc")) + Expect(service.Annotations).To( + HaveKeyWithValue("metallb.universe.tf/address-pool", "osp-internalapi")) + Expect(service.Annotations).To( + HaveKeyWithValue("metallb.universe.tf/allow-shared-ip", "osp-internalapi")) + Expect(service.Annotations).To( + HaveKeyWithValue("metallb.universe.tf/loadBalancerIPs", "internal-lb-ip-1,internal-lb-ip-2")) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ReadyCondition, + corev1.ConditionTrue, + ) + }) + }) + + When("A PlacementAPI is created with service override endpointURL set", func() { + BeforeEach(func() { + DeferCleanup(k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + DeferCleanup(keystone.DeleteKeystoneAPI, keystone.CreateKeystoneAPI(namespace)) + + spec := GetDefaultPlacementAPISpec() + serviceOverride := map[string]interface{}{} + serviceOverride["public"] = map[string]interface{}{ + "endpointURL": "http://placement-openstack.apps-crc.testing", + } + + spec["override"] = map[string]interface{}{ + "service": serviceOverride, + } + + placementAPI := CreatePlacementAPI(names.PlacementAPIName, spec) + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService( + namespace, + GetPlacementAPI(names.PlacementAPIName).Spec.DatabaseInstance, + corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{Port: 3306}}, + }, + ), + ) + + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + DeferCleanup(th.DeleteInstance, placementAPI) + }) + + It("creates KeystoneEndpoint", func() { + keystoneEndpoint := keystone.GetKeystoneEndpoint(names.KeystoneEndpointName) + endpoints := keystoneEndpoint.Spec.Endpoints + Expect(endpoints).To(HaveKeyWithValue("public", "http://placement-openstack.apps-crc.testing")) + Expect(endpoints).To(HaveKeyWithValue("internal", "http://placement-internal."+namespace+".svc:8778")) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.KeystoneEndpointReadyCondition, + corev1.ConditionTrue, + ) + }) + }) + + Context("PlacementAPI is fully deployed", func() { + keystoneAPIName := types.NamespacedName{} + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreatePlacementAPI(names.PlacementAPIName, GetDefaultPlacementAPISpec())) + DeferCleanup( + k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + keystoneAPIName = keystone.CreateKeystoneAPI(namespace) + DeferCleanup(keystone.DeleteKeystoneAPI, keystoneAPIName) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ReadyCondition, + corev1.ConditionTrue, + ) + }) + + It("removes the finalizers when deleted", func() { + placement := GetPlacementAPI(names.PlacementAPIName) + Expect(placement.Finalizers).To(ContainElement("openstack.org/placementapi")) + keystoneService := keystone.GetKeystoneService(names.KeystoneServiceName) + Expect(keystoneService.Finalizers).To(ContainElement("openstack.org/placementapi")) + keystoneEndpoint := keystone.GetKeystoneService(names.KeystoneEndpointName) + Expect(keystoneEndpoint.Finalizers).To(ContainElement("openstack.org/placementapi")) + db := mariadb.GetMariaDBDatabase(names.MariaDBDatabaseName) + Expect(db.Finalizers).To(ContainElement("openstack.org/placementapi")) + acc := mariadb.GetMariaDBAccount(names.MariaDBAccount) + Expect(acc.Finalizers).To(ContainElement("openstack.org/placementapi")) + + th.DeleteInstance(GetPlacementAPI(names.PlacementAPIName)) + + keystoneService = keystone.GetKeystoneService(names.KeystoneServiceName) + Expect(keystoneService.Finalizers).NotTo(ContainElement("openstack.org/placementapi")) + keystoneEndpoint = keystone.GetKeystoneService(names.KeystoneEndpointName) + Expect(keystoneEndpoint.Finalizers).NotTo(ContainElement("openstack.org/placementapi")) + db = mariadb.GetMariaDBDatabase(names.MariaDBDatabaseName) + Expect(db.Finalizers).NotTo(ContainElement("openstack.org/placementapi")) + acc = mariadb.GetMariaDBAccount(names.MariaDBAccount) + Expect(acc.Finalizers).NotTo(ContainElement("openstack.org/placementapi")) + }) + + It("updates the deployment if configuration changes", func() { + deployment := th.GetDeployment(names.DeploymentName) + oldConfigHash := GetEnvVarValue(deployment.Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "") + Expect(oldConfigHash).NotTo(Equal("")) + cm := th.GetSecret(names.ConfigMapName) + Expect(cm.Data["custom.conf"]).ShouldNot(ContainSubstring("debug")) + + Eventually(func(g Gomega) { + placement := GetPlacementAPI(names.PlacementAPIName) + placement.Spec.CustomServiceConfig = "[DEFAULT]/ndebug = true" + + g.Expect(k8sClient.Update(ctx, placement)).Should(Succeed()) + }, timeout, interval).Should(Succeed()) + + Eventually(func(g Gomega) { + deployment := th.GetDeployment(names.DeploymentName) + newConfigHash := GetEnvVarValue(deployment.Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "") + g.Expect(newConfigHash).NotTo(Equal("")) + g.Expect(newConfigHash).NotTo(Equal(oldConfigHash)) + + cm := th.GetSecret(names.ConfigMapName) + g.Expect(cm.Data["custom.conf"]).Should(ContainSubstring("debug = true")) + }, timeout, interval).Should(Succeed()) + }) + + It("updates the deployment if password changes", func() { + deployment := th.GetDeployment(names.DeploymentName) + oldConfigHash := GetEnvVarValue(deployment.Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "") + Expect(oldConfigHash).NotTo(Equal("")) + + th.UpdateSecret( + types.NamespacedName{Namespace: namespace, Name: SecretName}, + "PlacementPassword", []byte("foobar")) + + logger.Info("Reconfigured") + + Eventually(func(g Gomega) { + deployment := th.GetDeployment(names.DeploymentName) + newConfigHash := GetEnvVarValue(deployment.Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "") + g.Expect(newConfigHash).NotTo(Equal(oldConfigHash)) + // TODO(gibi): once the password is in the generated config + // assert it there + }, timeout, interval).Should(Succeed()) + }) + + It("updates the KeystoneAuthURL if keystone internal endpoint changes", func() { + deployment := th.GetDeployment(names.DeploymentName) + oldConfigHash := GetEnvVarValue(deployment.Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "") + Expect(oldConfigHash).NotTo(Equal("")) + + newInternalEndpoint := "https://keystone-internal" + + keystone.UpdateKeystoneAPIEndpoint(keystoneAPIName, "internal", newInternalEndpoint) + logger.Info("Reconfigured") + + Eventually(func(g Gomega) { + deployment := th.GetDeployment(names.DeploymentName) + newConfigHash := GetEnvVarValue(deployment.Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "") + g.Expect(newConfigHash).NotTo(Equal(oldConfigHash)) + }, timeout, interval).Should(Succeed()) + + cm := th.GetSecret(names.ConfigMapName) + conf := cm.Data["placement.conf"] + Expect(conf).Should( + ContainSubstring("auth_url = %s", newInternalEndpoint)) + }) + }) + + When("A PlacementAPI is created with TLS", func() { + BeforeEach(func() { + DeferCleanup(k8sClient.Delete, ctx, th.CreateCABundleSecret(names.CaBundleSecretName)) + DeferCleanup(k8sClient.Delete, ctx, th.CreateCertSecret(names.InternalCertSecretName)) + DeferCleanup(k8sClient.Delete, ctx, th.CreateCertSecret(names.PublicCertSecretName)) + + spec := GetTLSPlacementAPISpec(names) + placement := CreatePlacementAPI(names.PlacementAPIName, spec) + DeferCleanup(th.DeleteInstance, placement) + + DeferCleanup(keystone.DeleteKeystoneAPI, keystone.CreateKeystoneAPI(namespace)) + DeferCleanup(k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBTLSDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + DeferCleanup(th.DeleteInstance, placement) + }) + + It("it creates deployment with CA and service certs mounted", func() { + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.TLSInputReadyCondition, + corev1.ConditionTrue, + ) + + j := th.GetDeployment(names.DeploymentName) + + container := j.Spec.Template.Spec.Containers[0] + + // CA bundle + th.AssertVolumeExists(names.CaBundleSecretName.Name, j.Spec.Template.Spec.Volumes) + th.AssertVolumeMountExists(names.CaBundleSecretName.Name, "tls-ca-bundle.pem", j.Spec.Template.Spec.Containers[0].VolumeMounts) + + // service certs + th.AssertVolumeExists(names.InternalCertSecretName.Name, j.Spec.Template.Spec.Volumes) + th.AssertVolumeExists(names.PublicCertSecretName.Name, j.Spec.Template.Spec.Volumes) + th.AssertVolumeMountExists(names.PublicCertSecretName.Name, "tls.key", j.Spec.Template.Spec.Containers[0].VolumeMounts) + th.AssertVolumeMountExists(names.PublicCertSecretName.Name, "tls.crt", j.Spec.Template.Spec.Containers[0].VolumeMounts) + th.AssertVolumeMountExists(names.InternalCertSecretName.Name, "tls.key", j.Spec.Template.Spec.Containers[0].VolumeMounts) + th.AssertVolumeMountExists(names.InternalCertSecretName.Name, "tls.crt", j.Spec.Template.Spec.Containers[0].VolumeMounts) + + Expect(container.ReadinessProbe.HTTPGet.Scheme).To(Equal(corev1.URISchemeHTTPS)) + Expect(container.LivenessProbe.HTTPGet.Scheme).To(Equal(corev1.URISchemeHTTPS)) + + configDataMap := th.GetSecret(names.ConfigMapName) + Expect(configDataMap).ShouldNot(BeNil()) + Expect(configDataMap.Data).Should(HaveKey("httpd.conf")) + Expect(configDataMap.Data).Should(HaveKey("ssl.conf")) + configData := string(configDataMap.Data["httpd.conf"]) + Expect(configData).Should(ContainSubstring("SSLEngine on")) + Expect(configData).Should(ContainSubstring("SSLCertificateFile \"/etc/pki/tls/certs/internal.crt\"")) + Expect(configData).Should(ContainSubstring("SSLCertificateKeyFile \"/etc/pki/tls/private/internal.key\"")) + Expect(configData).Should(ContainSubstring("SSLCertificateFile \"/etc/pki/tls/certs/public.crt\"")) + Expect(configData).Should(ContainSubstring("SSLCertificateKeyFile \"/etc/pki/tls/private/public.key\"")) + + configData = string(configDataMap.Data["my.cnf"]) + Expect(configData).To( + ContainSubstring("[client]\nssl-ca=/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem\nssl=1")) + }) + }) + + When("A PlacementAPI is created with a wrong topologyref", func() { + BeforeEach(func() { + spec := GetDefaultPlacementAPISpec() + spec["topologyRef"] = map[string]interface{}{ + "name": "foo", + } + placement := CreatePlacementAPI(names.PlacementAPIName, spec) + DeferCleanup(th.DeleteInstance, placement) + }) + + It("points to a non existing topology CR", func() { + // Reconciliation does not succeed because TopologyReadyCondition + // is not marked as True + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ReadyCondition, + corev1.ConditionFalse, + ) + // TopologyReadyCondition is Unknown as it waits for the Topology + // CR to be available + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.TopologyReadyCondition, + corev1.ConditionUnknown, + ) + }) + }) + When("A PlacementAPI is created with topologyref", func() { + var topologyRef, topologyRefAlt *topologyv1.TopoRef + BeforeEach(func() { + // Define the two topology references used in this test + topologyRef = &topologyv1.TopoRef{ + Name: names.PlacementAPITopologies[0].Name, + Namespace: names.PlacementAPITopologies[0].Namespace, + } + topologyRefAlt = &topologyv1.TopoRef{ + Name: names.PlacementAPITopologies[1].Name, + Namespace: names.PlacementAPITopologies[1].Namespace, + } + // Create Test Topologies + for _, t := range names.PlacementAPITopologies { + // Build the topology Spec + topologySpec, _ := GetSampleTopologySpec(t.Name) + infra.CreateTopology(t, topologySpec) + } + spec := GetDefaultPlacementAPISpec() + spec["topologyRef"] = map[string]interface{}{ + "name": topologyRef.Name, + } + placement := CreatePlacementAPI(names.PlacementAPIName, spec) + DeferCleanup(th.DeleteInstance, placement) + + DeferCleanup(keystone.DeleteKeystoneAPI, keystone.CreateKeystoneAPI(namespace)) + DeferCleanup(k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + DeferCleanup(th.DeleteInstance, placement) + }) + + It("sets topology in CR status", func() { + Eventually(func(g Gomega) { + tp := infra.GetTopology(types.NamespacedName{ + Name: topologyRef.Name, + Namespace: topologyRef.Namespace, + }) + finalizers := tp.GetFinalizers() + g.Expect(finalizers).To(HaveLen(1)) + placement := GetPlacementAPI(names.PlacementAPIName) + g.Expect(placement.Status.LastAppliedTopology).ToNot(BeNil()) + g.Expect(placement.Status.LastAppliedTopology).To(Equal(topologyRef)) + g.Expect(finalizers).To(ContainElement( + fmt.Sprintf("openstack.org/placementapi-%s", names.PlacementAPIName.Name))) + }, timeout, interval).Should(Succeed()) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.TopologyReadyCondition, + corev1.ConditionTrue, + ) + }) + + It("sets topology in resource specs", func() { + Eventually(func(g Gomega) { + _, topologySpecObj := GetSampleTopologySpec(topologyRef.Name) + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.Affinity).To(BeNil()) + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.TopologySpreadConstraints).ToNot(BeNil()) + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.TopologySpreadConstraints).To(Equal(topologySpecObj)) + }, timeout, interval).Should(Succeed()) + }) + It("updates topology when the reference changes", func() { + Eventually(func(g Gomega) { + placement := GetPlacementAPI(names.PlacementAPIName) + placement.Spec.TopologyRef.Name = topologyRefAlt.Name + g.Expect(k8sClient.Update(ctx, placement)).To(Succeed()) + }, timeout, interval).Should(Succeed()) + + Eventually(func(g Gomega) { + tp := infra.GetTopology(types.NamespacedName{ + Name: topologyRefAlt.Name, + Namespace: topologyRefAlt.Namespace, + }) + finalizers := tp.GetFinalizers() + g.Expect(finalizers).To(HaveLen(1)) + placement := GetPlacementAPI(names.PlacementAPIName) + g.Expect(placement.Status.LastAppliedTopology).ToNot(BeNil()) + g.Expect(placement.Status.LastAppliedTopology).To(Equal(topologyRefAlt)) + g.Expect(finalizers).To(ContainElement( + fmt.Sprintf("openstack.org/placementapi-%s", names.PlacementAPIName.Name))) + // Verify the previous referenced topology has no finalizers + tp = infra.GetTopology(types.NamespacedName{ + Name: topologyRef.Name, + Namespace: topologyRef.Namespace, + }) + finalizers = tp.GetFinalizers() + g.Expect(finalizers).To(BeEmpty()) + }, timeout, interval).Should(Succeed()) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.TopologyReadyCondition, + corev1.ConditionTrue, + ) + }) + It("removes topologyRef from the spec", func() { + Eventually(func(g Gomega) { + placement := GetPlacementAPI(names.PlacementAPIName) + // Remove the TopologyRef from the existing Placement .Spec + placement.Spec.TopologyRef = nil + g.Expect(k8sClient.Update(ctx, placement)).To(Succeed()) + }, timeout, interval).Should(Succeed()) + + Eventually(func(g Gomega) { + placement := GetPlacementAPI(names.PlacementAPIName) + g.Expect(placement.Status.LastAppliedTopology).Should(BeNil()) + }, timeout, interval).Should(Succeed()) + + Eventually(func(g Gomega) { + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.TopologySpreadConstraints).To(BeNil()) + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.Affinity).ToNot(BeNil()) + }, timeout, interval).Should(Succeed()) + + // Verify the existing topologies have no finalizer anymore + Eventually(func(g Gomega) { + for _, topology := range names.PlacementAPITopologies { + tp := infra.GetTopology(types.NamespacedName{ + Name: topology.Name, + Namespace: topology.Namespace, + }) + finalizers := tp.GetFinalizers() + g.Expect(finalizers).To(BeEmpty()) + } + }, timeout, interval).Should(Succeed()) + }) + }) + + When("A PlacementAPI is created with nodeSelector", func() { + BeforeEach(func() { + spec := GetDefaultPlacementAPISpec() + spec["nodeSelector"] = map[string]interface{}{ + "foo": "bar", + } + + placement := CreatePlacementAPI(names.PlacementAPIName, spec) + DeferCleanup(th.DeleteInstance, placement) + + DeferCleanup(keystone.DeleteKeystoneAPI, keystone.CreateKeystoneAPI(namespace)) + DeferCleanup(k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + DeferCleanup(th.DeleteInstance, placement) + }) + + It("sets nodeSelector in resource specs", func() { + Eventually(func(g Gomega) { + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo": "bar"})) + g.Expect(th.GetJob(names.DBSyncJobName).Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo": "bar"})) + }, timeout, interval).Should(Succeed()) + }) + + It("updates nodeSelector in resource specs when changed", func() { + Eventually(func(g Gomega) { + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo": "bar"})) + g.Expect(th.GetJob(names.DBSyncJobName).Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo": "bar"})) + }, timeout, interval).Should(Succeed()) + + Eventually(func(g Gomega) { + placement := GetPlacementAPI(names.PlacementAPIName) + newNodeSelector := map[string]string{ + "foo2": "bar2", + } + placement.Spec.NodeSelector = &newNodeSelector + g.Expect(k8sClient.Update(ctx, placement)).Should(Succeed()) + }, timeout, interval).Should(Succeed()) + + Eventually(func(g Gomega) { + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo2": "bar2"})) + g.Expect(th.GetJob(names.DBSyncJobName).Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo2": "bar2"})) + }, timeout, interval).Should(Succeed()) + }) + + It("removes nodeSelector from resource specs when cleared", func() { + Eventually(func(g Gomega) { + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo": "bar"})) + g.Expect(th.GetJob(names.DBSyncJobName).Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo": "bar"})) + }, timeout, interval).Should(Succeed()) + + Eventually(func(g Gomega) { + placement := GetPlacementAPI(names.PlacementAPIName) + emptyNodeSelector := map[string]string{} + placement.Spec.NodeSelector = &emptyNodeSelector + g.Expect(k8sClient.Update(ctx, placement)).Should(Succeed()) + }, timeout, interval).Should(Succeed()) + + Eventually(func(g Gomega) { + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.NodeSelector).To(BeNil()) + g.Expect(th.GetJob(names.DBSyncJobName).Spec.Template.Spec.NodeSelector).To(BeNil()) + }, timeout, interval).Should(Succeed()) + }) + + It("removes nodeSelector from resource specs when nilled", func() { + Eventually(func(g Gomega) { + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo": "bar"})) + g.Expect(th.GetJob(names.DBSyncJobName).Spec.Template.Spec.NodeSelector).To(Equal(map[string]string{"foo": "bar"})) + }, timeout, interval).Should(Succeed()) + + Eventually(func(g Gomega) { + placement := GetPlacementAPI(names.PlacementAPIName) + placement.Spec.NodeSelector = nil + g.Expect(k8sClient.Update(ctx, placement)).Should(Succeed()) + }, timeout, interval).Should(Succeed()) + + Eventually(func(g Gomega) { + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + g.Expect(th.GetDeployment(names.DeploymentName).Spec.Template.Spec.NodeSelector).To(BeNil()) + g.Expect(th.GetJob(names.DBSyncJobName).Spec.Template.Spec.NodeSelector).To(BeNil()) + }, timeout, interval).Should(Succeed()) + }) + }) + // Run MariaDBAccount suite tests. these are pre-packaged ginkgo tests + // that exercise standard account create / update patterns that should be + // common to all controllers that ensure MariaDBAccount CRs. + + mariadbSuite := &mariadb_test.MariaDBTestHarness{ + PopulateHarness: func(harness *mariadb_test.MariaDBTestHarness) { + harness.Setup( + "Placement", + names.PlacementAPIName.Namespace, + placement.DatabaseName, + "openstack.org/placementapi", + mariadb, timeout, interval, + ) + }, + + // Generate a fully running Keystone service given an accountName + // needs to make it all the way to the end where the mariadb finalizers + // are removed from unused accounts since that's part of what we are testing + SetupCR: func(accountName types.NamespacedName) { + DeferCleanup(k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + keystoneAPIName := keystone.CreateKeystoneAPI(namespace) + DeferCleanup(keystone.DeleteKeystoneAPI, keystoneAPIName) + + spec := GetDefaultPlacementAPISpec() + spec["databaseAccount"] = accountName.Name + DeferCleanup( + th.DeleteInstance, + CreatePlacementAPI(names.PlacementAPIName, spec), + ) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + db := mariadb.GetMariaDBDatabase(names.MariaDBDatabaseName) + Expect(db.Spec.Name).To(Equal(names.MariaDBDatabaseName.Name)) + + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(accountName) + + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ReadyCondition, + corev1.ConditionTrue, + ) + }, + // Change the account name in the service to a new name + UpdateAccount: func(newAccountName types.NamespacedName) { + + Eventually(func(g Gomega) { + placementapi := GetPlacementAPI(names.PlacementAPIName) + placementapi.Spec.DatabaseAccount = newAccountName.Name + g.Expect(th.K8sClient.Update(ctx, placementapi)).Should(Succeed()) + }, timeout, interval).Should(Succeed()) + + }, + SwitchToNewAccount: func() { + th.SimulateJobSuccess(names.DBSyncJobName) + + Eventually(func(g Gomega) { + th.SimulateDeploymentReplicaReady(names.DeploymentName) + placementapi := GetPlacementAPI(names.PlacementAPIName) + g.Expect(placementapi.Status.Conditions.Get(condition.DeploymentReadyCondition).Status).To( + Equal(corev1.ConditionTrue)) + + }, timeout, interval).Should(Succeed()) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ReadyCondition, + corev1.ConditionTrue, + ) + }, + // delete the CR instance to exercise finalizer removal + DeleteCR: func() { + th.DeleteInstance(GetPlacementAPI(names.PlacementAPIName)) + }, + } + + mariadbSuite.RunBasicSuite() + + mariadbSuite.RunURLAssertSuite(func(_ types.NamespacedName, username string, password string) { + Eventually(func(g Gomega) { + cm := th.GetSecret(names.ConfigMapName) + + conf := cm.Data["placement.conf"] + + g.Expect(string(conf)).Should( + ContainSubstring(fmt.Sprintf("connection = mysql+pymysql://%s:%s@hostname-for-openstack.%s.svc/placement?read_default_file=/etc/my.cnf", + username, password, namespace))) + }, timeout, interval).Should(Succeed()) + + }) + + mariadbSuite.RunConfigHashSuite(func() string { + deployment := th.GetDeployment(names.DeploymentName) + return GetEnvVarValue(deployment.Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "") + }) + +}) + +var _ = Describe("PlacementAPI reconfiguration", func() { + BeforeEach(func() { + err := os.Setenv("OPERATOR_TEMPLATES", "../../../templates") + Expect(err).NotTo(HaveOccurred()) + }) + + When("TLS certs are reconfigured", func() { + BeforeEach(func() { + + DeferCleanup(k8sClient.Delete, ctx, th.CreateCABundleSecret(names.CaBundleSecretName)) + DeferCleanup(k8sClient.Delete, ctx, th.CreateCertSecret(names.InternalCertSecretName)) + DeferCleanup(k8sClient.Delete, ctx, th.CreateCertSecret(names.PublicCertSecretName)) + DeferCleanup(th.DeleteInstance, CreatePlacementAPI(names.PlacementAPIName, GetTLSPlacementAPISpec(names))) + DeferCleanup(keystone.DeleteKeystoneAPI, keystone.CreateKeystoneAPI(namespace)) + DeferCleanup(k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + + spec := GetTLSPlacementAPISpec(names) + placement := CreatePlacementAPI(names.PlacementAPIName, spec) + + serviceSpec := corev1.ServiceSpec{Ports: []corev1.ServicePort{{Port: 3306}}} + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService(namespace, "openstack", serviceSpec), + ) + mariadb.SimulateMariaDBTLSDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + + th.SimulateJobSuccess(names.DBSyncJobName) + DeferCleanup(th.DeleteInstance, placement) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ReadyCondition, + corev1.ConditionTrue, + ) + }) + + It("reconfigures the API pod", func() { + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.TLSInputReadyCondition, + corev1.ConditionTrue, + ) + + // Grab the current config hash + originalHash := GetEnvVarValue( + th.GetDeployment(names.DeploymentName).Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "") + Expect(originalHash).NotTo(BeEmpty()) + + // Change the content of the CA secret + th.UpdateSecret(names.CaBundleSecretName, "tls-ca-bundle.pem", []byte("DifferentCAData")) + // Assert that the deployment is updated + Eventually(func(g Gomega) { + newHash := GetEnvVarValue( + th.GetDeployment(names.DeploymentName).Spec.Template.Spec.Containers[0].Env, "CONFIG_HASH", "") + g.Expect(newHash).NotTo(BeEmpty()) + g.Expect(newHash).NotTo(Equal(originalHash)) + }, timeout, interval).Should(Succeed()) + }) + + }) + +}) diff --git a/test/functional/placement/placementapi_webhook_test.go b/test/functional/placement/placementapi_webhook_test.go new file mode 100644 index 000000000..2a1d8e3fc --- /dev/null +++ b/test/functional/placement/placementapi_webhook_test.go @@ -0,0 +1,219 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package functional_test + +import ( + "errors" + "os" + + . "github.com/onsi/ginkgo/v2" //revive:disable:dot-imports + . "github.com/onsi/gomega" //revive:disable:dot-imports + + //revive:disable-next-line:dot-imports + . "github.com/openstack-k8s-operators/lib-common/modules/common/test/helpers" + + corev1 "k8s.io/api/core/v1" + k8s_errors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + + condition "github.com/openstack-k8s-operators/lib-common/modules/common/condition" + "github.com/openstack-k8s-operators/lib-common/modules/common/service" + placementv1 "github.com/openstack-k8s-operators/nova-operator/apis/placement/v1beta1" +) + +var _ = Describe("PlacementAPI Webhook", func() { + + var placementAPIName types.NamespacedName + + BeforeEach(func() { + + placementAPIName = types.NamespacedName{ + Name: "placement", + Namespace: namespace, + } + + err := os.Setenv("OPERATOR_TEMPLATES", "../../../templates") + Expect(err).NotTo(HaveOccurred()) + }) + + When("A PlacementAPI instance is created without container images", func() { + BeforeEach(func() { + DeferCleanup(th.DeleteInstance, CreatePlacementAPI(placementAPIName, GetDefaultPlacementAPISpec())) + }) + + It("should have the defaults initialized by webhook", func() { + PlacementAPI := GetPlacementAPI(placementAPIName) + Expect(PlacementAPI.Spec.ContainerImage).Should(Equal( + placementv1.PlacementAPIContainerImage, + )) + }) + }) + + When("A PlacementAPI instance is created with container images", func() { + BeforeEach(func() { + placementAPISpec := GetDefaultPlacementAPISpec() + placementAPISpec["containerImage"] = "api-container-image" + DeferCleanup(th.DeleteInstance, CreatePlacementAPI(placementAPIName, placementAPISpec)) + }) + + It("should use the given values", func() { + PlacementAPI := GetPlacementAPI(placementAPIName) + Expect(PlacementAPI.Spec.ContainerImage).Should(Equal( + "api-container-image", + )) + }) + }) + + It("rejects PlacementAPI with wrong defaultConfigOverwrite", func() { + spec := GetDefaultPlacementAPISpec() + spec["defaultConfigOverwrite"] = map[string]interface{}{ + "policy.yaml": "support", + "api-paste.ini": "not supported", + } + raw := map[string]interface{}{ + "apiVersion": "placement.openstack.org/v1beta1", + "kind": "PlacementAPI", + "metadata": map[string]interface{}{ + "name": placementAPIName.Name, + "namespace": placementAPIName.Namespace, + }, + "spec": spec, + } + unstructuredObj := &unstructured.Unstructured{Object: raw} + _, err := controllerutil.CreateOrPatch( + ctx, k8sClient, unstructuredObj, func() error { return nil }) + + Expect(err).Should(HaveOccurred()) + var statusError *k8s_errors.StatusError + Expect(errors.As(err, &statusError)).To(BeTrue()) + Expect(statusError.ErrStatus.Details.Kind).To(Equal("PlacementAPI")) + Expect(statusError.ErrStatus.Message).To( + ContainSubstring( + "invalid: spec.defaultConfigOverwrite: " + + "Invalid value: \"api-paste.ini\": " + + "Only the following keys are valid: policy.yaml", + ), + ) + }) + + It("rejects with wrong service override endpoint type", func() { + spec := GetDefaultPlacementAPISpec() + spec["override"] = map[string]interface{}{ + "service": map[string]interface{}{ + "internal": map[string]interface{}{}, + "wrooong": map[string]interface{}{}, + }, + } + + raw := map[string]interface{}{ + "apiVersion": "placement.openstack.org/v1beta1", + "kind": "PlacementAPI", + "metadata": map[string]interface{}{ + "name": placementAPIName.Name, + "namespace": placementAPIName.Namespace, + }, + "spec": spec, + } + + unstructuredObj := &unstructured.Unstructured{Object: raw} + _, err := controllerutil.CreateOrPatch( + th.Ctx, th.K8sClient, unstructuredObj, func() error { return nil }) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring( + "invalid: spec.override.service[wrooong]: " + + "Invalid value: \"wrooong\": invalid endpoint type: wrooong"), + ) + }) + + When("A PlacementAPI instance is updated with wrong service override endpoint", func() { + BeforeEach(func() { + DeferCleanup(k8sClient.Delete, ctx, CreatePlacementAPISecret(namespace, SecretName)) + DeferCleanup(keystone.DeleteKeystoneAPI, keystone.CreateKeystoneAPI(namespace)) + + placementAPI := CreatePlacementAPI(names.PlacementAPIName, GetDefaultPlacementAPISpec()) + DeferCleanup( + mariadb.DeleteDBService, + mariadb.CreateDBService( + namespace, + GetPlacementAPI(names.PlacementAPIName).Spec.DatabaseInstance, + corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{Port: 3306}}, + }, + ), + ) + + mariadb.SimulateMariaDBDatabaseCompleted(names.MariaDBDatabaseName) + mariadb.SimulateMariaDBAccountCompleted(names.MariaDBAccount) + th.SimulateJobSuccess(names.DBSyncJobName) + th.SimulateDeploymentReplicaReady(names.DeploymentName) + keystone.SimulateKeystoneServiceReady(names.KeystoneServiceName) + keystone.SimulateKeystoneEndpointReady(names.KeystoneEndpointName) + DeferCleanup(th.DeleteInstance, placementAPI) + + th.ExpectCondition( + names.PlacementAPIName, + ConditionGetterFunc(PlacementConditionGetter), + condition.ReadyCondition, + corev1.ConditionTrue, + ) + }) + It("rejects update with wrong service override endpoint type", func() { + PlacementAPI := GetPlacementAPI(names.PlacementAPIName) + Expect(PlacementAPI).NotTo(BeNil()) + if PlacementAPI.Spec.Override.Service == nil { + PlacementAPI.Spec.Override.Service = map[service.Endpoint]service.RoutedOverrideSpec{} + } + PlacementAPI.Spec.Override.Service["wrooong"] = service.RoutedOverrideSpec{} + err := k8sClient.Update(ctx, PlacementAPI) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring( + "invalid: spec.override.service[wrooong]: " + + "Invalid value: \"wrooong\": invalid endpoint type: wrooong"), + ) + }) + }) + + It("rejects a wrong TopologyRef on a different namespace", func() { + spec := GetDefaultPlacementAPISpec() + // Inject a topologyRef that points to a different namespace + spec["topologyRef"] = map[string]interface{}{ + "name": "foo", + "namespace": "bar", + } + raw := map[string]interface{}{ + "apiVersion": "placement.openstack.org/v1beta1", + "kind": "PlacementAPI", + "metadata": map[string]interface{}{ + "name": placementAPIName.Name, + "namespace": placementAPIName.Namespace, + }, + "spec": spec, + } + unstructuredObj := &unstructured.Unstructured{Object: raw} + _, err := controllerutil.CreateOrPatch( + th.Ctx, th.K8sClient, unstructuredObj, func() error { return nil }) + Expect(err).To(HaveOccurred()) + Expect(err.Error()).To( + ContainSubstring( + "spec.topologyRef.namespace: Invalid value: \"namespace\": Customizing namespace field is not supported"), + ) + }) +}) diff --git a/test/functional/placement/suite_test.go b/test/functional/placement/suite_test.go new file mode 100644 index 000000000..ca58dd09b --- /dev/null +++ b/test/functional/placement/suite_test.go @@ -0,0 +1,240 @@ +/* +Copyright 2022. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package functional_test + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "path/filepath" + "testing" + "time" + + "github.com/go-logr/logr" + "github.com/google/uuid" + . "github.com/onsi/ginkgo/v2" //revive:disable:dot-imports + . "github.com/onsi/gomega" //revive:disable:dot-imports + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + "sigs.k8s.io/controller-runtime/pkg/webhook" + + infra_test "github.com/openstack-k8s-operators/infra-operator/apis/test/helpers" + topologyv1 "github.com/openstack-k8s-operators/infra-operator/apis/topology/v1beta1" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + keystonev1 "github.com/openstack-k8s-operators/keystone-operator/api/v1beta1" + test "github.com/openstack-k8s-operators/lib-common/modules/test" + mariadbv1 "github.com/openstack-k8s-operators/mariadb-operator/api/v1beta1" + placementv1 "github.com/openstack-k8s-operators/nova-operator/apis/placement/v1beta1" + placement_ctrl "github.com/openstack-k8s-operators/nova-operator/controllers/placement" + + keystone_test "github.com/openstack-k8s-operators/keystone-operator/api/test/helpers" + common_test "github.com/openstack-k8s-operators/lib-common/modules/common/test/helpers" + mariadb_test "github.com/openstack-k8s-operators/mariadb-operator/api/test/helpers" + //+kubebuilder:scaffold:imports +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + cfg *rest.Config + k8sClient client.Client // You'll be using this client in your tests. + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc + logger logr.Logger + th *common_test.TestHelper + keystone *keystone_test.TestHelper + mariadb *mariadb_test.TestHelper + infra *infra_test.TestHelper + namespace string + names Names +) + +const ( + timeout = time.Second * 10 + + SecretName = "test-osp-secret" + + AccountName = "test-placement-account" + + PublicCertSecretName = "public-tls-certs" // #nosec G101 + + InternalCertSecretName = "internal-tls-certs" // #nosec G101 + + CABundleSecretName = "combined-ca-bundle" // #nosec G101 + + interval = time.Millisecond * 200 +) + +func TestAPIs(t *testing.T) { + RegisterFailHandler(Fail) + + RunSpecs(t, "Controller Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) + + ctx, cancel = context.WithCancel(context.TODO()) + + keystoneCRDs, err := test.GetCRDDirFromModule( + "github.com/openstack-k8s-operators/keystone-operator/api", "../../../go.mod", "bases") + Expect(err).ShouldNot(HaveOccurred()) + mariaDBCRDs, err := test.GetCRDDirFromModule( + "github.com/openstack-k8s-operators/mariadb-operator/api", "../../../go.mod", "bases") + Expect(err).ShouldNot(HaveOccurred()) + topologyCRDs, err := test.GetCRDDirFromModule( + "github.com/openstack-k8s-operators/infra-operator/apis", "../../../go.mod", "bases") + Expect(err).ShouldNot(HaveOccurred()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{ + filepath.Join("..", "..", "..", "config", "crd", "bases"), + // NOTE(gibi): we need to list all the external CRDs our operator depends on + keystoneCRDs, + mariaDBCRDs, + topologyCRDs, + }, + ErrorIfCRDPathMissing: true, + WebhookInstallOptions: envtest.WebhookInstallOptions{ + Paths: []string{filepath.Join("..", "..", "..", "config", "webhook")}, + // NOTE(gibi): if localhost is resolved to ::1 (ipv6) then starting + // the webhook fails as it try to parse the address as ipv4 and + // failing on the colons in ::1 + LocalServingHost: "127.0.0.1", + }, + } + + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + // NOTE(gibi): Need to add all API schemas our operator can own. + // Keep this in synch with PlacementAPIReconciler.SetupWithManager, + // otherwise the reconciler loop will silently not start + // in the test env. + err = placementv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = mariadbv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = keystonev1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + err = topologyv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + //+kubebuilder:scaffold:scheme + + logger = ctrl.Log.WithName("---Test---") + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + th = common_test.NewTestHelper(ctx, k8sClient, timeout, interval, logger) + Expect(th).NotTo(BeNil()) + keystone = keystone_test.NewTestHelper(ctx, k8sClient, timeout, interval, logger) + Expect(keystone).NotTo(BeNil()) + mariadb = mariadb_test.NewTestHelper(ctx, k8sClient, timeout, interval, logger) + Expect(mariadb).NotTo(BeNil()) + infra = infra_test.NewTestHelper(ctx, k8sClient, timeout, interval, logger) + Expect(infra).NotTo(BeNil()) + + // Start the controller-manager if goroutine + webhookInstallOptions := &testEnv.WebhookInstallOptions + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{ + BindAddress: "0", + }, + WebhookServer: webhook.NewServer( + webhook.Options{ + Host: webhookInstallOptions.LocalServingHost, + Port: webhookInstallOptions.LocalServingPort, + CertDir: webhookInstallOptions.LocalServingCertDir, + }), + LeaderElection: false, + }) + Expect(err).ToNot(HaveOccurred()) + + kclient, err := kubernetes.NewForConfig(cfg) + Expect(err).ToNot(HaveOccurred(), "failed to create kclient") + + err = (&placementv1.PlacementAPI{}).SetupWebhookWithManager(k8sManager) + Expect(err).NotTo(HaveOccurred()) + + placementv1.SetupDefaults() + + err = (&placement_ctrl.PlacementAPIReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + Kclient: kclient, + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() + + // wait for the webhook server to get ready + dialer := &net.Dialer{Timeout: time.Duration(10) * time.Second} + addrPort := fmt.Sprintf("%s:%d", webhookInstallOptions.LocalServingHost, webhookInstallOptions.LocalServingPort) + Eventually(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addrPort, &tls.Config{InsecureSkipVerify: true}) // #nosec G402 + if err != nil { + return err + } + conn.Close() + return nil + }).Should(Succeed()) +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) + +var _ = BeforeEach(func() { + // NOTE(gibi): We need to create a unique namespace for each test run + // as namespaces cannot be deleted in a locally running envtest. See + // https://book.kubebuilder.io/reference/envtest.html#namespace-usage-limitation + namespace = uuid.New().String() + th.CreateNamespace(namespace) + // We still request the delete of the Namespace to properly cleanup if + // we run the test in an existing cluster. + DeferCleanup(th.DeleteNamespace, namespace) + + placementAPIName := types.NamespacedName{ + Namespace: namespace, + Name: uuid.New().String()[:25], + } + + names = CreateNames(placementAPIName) +}) diff --git a/test/kuttl/placement/common/assert_sample_deployment.yaml b/test/kuttl/placement/common/assert_sample_deployment.yaml new file mode 100644 index 000000000..87b847ec1 --- /dev/null +++ b/test/kuttl/placement/common/assert_sample_deployment.yaml @@ -0,0 +1,287 @@ +apiVersion: placement.openstack.org/v1beta1 +kind: PlacementAPI +metadata: + finalizers: + - openstack.org/placementapi + name: placement +spec: + customServiceConfig: | + [DEFAULT] + debug = true + databaseInstance: openstack + databaseAccount: placement + passwordSelectors: + service: PlacementPassword + preserveJobs: false + replicas: 1 + secret: osp-secret + serviceUser: placement +status: + databaseHostname: openstack.placement-kuttl-tests.svc + readyCount: 1 + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Create service completed + reason: Ready + status: "True" + type: CreateServiceReady + - message: DB create completed + reason: Ready + status: "True" + type: DBReady + - message: DBsync completed + reason: Ready + status: "True" + type: DBSyncReady + - message: Deployment completed + reason: Ready + status: "True" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: Setup complete + reason: Ready + status: "True" + type: KeystoneEndpointReady + - message: Setup complete + reason: Ready + status: "True" + type: KeystoneServiceReady + - message: MariaDBAccount creation complete + reason: Ready + status: "True" + type: MariaDBAccountReady + - message: NetworkAttachments completed + reason: Ready + status: "True" + type: NetworkAttachmentsReady + - message: RoleBinding created + reason: Ready + status: "True" + type: RoleBindingReady + - message: Role created + reason: Ready + status: "True" + type: RoleReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Service config create completed + reason: Ready + status: "True" + type: ServiceConfigReady + - message: Input data complete + reason: Ready + status: "True" + type: TLSInputReady +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: placement +spec: + replicas: 1 + template: + metadata: + labels: + service: placement + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: service + operator: In + values: + - placement + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - args: + - --single-child + - -- + - /usr/bin/tail + - -n+1 + - -F + - /var/log/placement/placement-api.log + command: + - /usr/bin/dumb-init + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8778 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 30 + name: placement-log + readinessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8778 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 30 + resources: {} + securityContext: + runAsUser: 42482 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /usr/local/bin/container-scripts + name: scripts + readOnly: true + - mountPath: /var/log/placement + name: logs + - mountPath: /var/lib/openstack/config + name: config-data + - mountPath: /var/lib/kolla/config_files/config.json + name: config-data + readOnly: true + subPath: placement-api-config.json + - args: + - -c + - /usr/local/bin/kolla_start + command: + - /bin/bash + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8778 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 30 + name: placement-api + readinessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8778 + scheme: HTTP + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 30 + resources: {} + restartPolicy: Always + securityContext: {} + serviceAccount: placement-placement + serviceAccountName: placement-placement +status: + availableReplicas: 1 + replicas: 1 +--- +# the openshift annotations can't be checked through the deployment above +apiVersion: v1 +kind: Pod +metadata: + annotations: + openshift.io/scc: anyuid + labels: + service: placement +status: + phase: Running +--- +apiVersion: v1 +kind: Service +metadata: + labels: + endpoint: internal + service: placement + name: placement-internal +spec: + ports: + - name: placement-internal + selector: + service: placement + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + labels: + endpoint: public + service: placement + name: placement-public +spec: + ports: + - name: placement-public + selector: + service: placement + type: ClusterIP +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + placement.openstack.org/name: placement + name: placement-config-data + ownerReferences: + - blockOwnerDeletion: true + controller: true + kind: PlacementAPI + name: placement +--- +apiVersion: keystone.openstack.org/v1beta1 +kind: KeystoneEndpoint +metadata: + name: placement + ownerReferences: + - apiVersion: placement.openstack.org/v1beta1 + blockOwnerDeletion: true + controller: true + kind: PlacementAPI + name: placement +--- +# the actual addresses of the api endpoints are platform specific, so we can't rely on +# kuttl asserts to check them. This short script gathers the addresses and checks that +# the two endpoints are defined and their addresses follow the default pattern +apiVersion: kuttl.dev/v1beta1 +kind: TestAssert +namespaced: true +commands: + - script: | + . $PLACEMENT_KUTTL_DIR/../lib/helper_functions.sh + apiEndpoints=$(oc get -n $NAMESPACE KeystoneEndpoint placement -o go-template-file=$PLACEMENT_KUTTL_DIR/../go_templates/apiEndpoints.gotemplate) + assert_regex $apiEndpoints 'http:\/\/placement-internal\..+\.svc.*' + assert_regex $apiEndpoints 'http:\/\/placement-public\..+\.svc.*' + + # when using image digests the containerImage URLs are SHA's so we verify them with a script + tupleTemplate='{{ range (index .spec.template.spec.containers 1).env }}{{ .name }}{{ "#" }}{{ .value}}{{"\n"}}{{ end }}' + imageTuples=$(oc get -n openstack-operators deployment placement-operator-controller-manager -o go-template="$tupleTemplate") + # format of imageTuple is: RELATED_IMAGE_PLACEMENT_# separated by newlines + for ITEM in $(echo $imageTuples); do + # it is an image + if echo $ITEM | grep 'RELATED_IMAGE' &> /dev/null; then + NAME=$(echo $ITEM | sed -e 's|^RELATED_IMAGE_PLACEMENT_\([^_]*\)_.*|\1|') + IMG_FROM_ENV=$(echo $ITEM | sed -e 's|^.*#\(.*\)|\1|') + template='{{.spec.containerImage}}' + case $NAME in + API) + SERVICE_IMAGE=$(oc get -n $NAMESPACE placementapi placement -o go-template="$template") + ;; + esac + if [ "$SERVICE_IMAGE" != "$IMG_FROM_ENV" ]; then + echo "$NAME image does not equal $VALUE" + exit 1 + fi + fi + done diff --git a/test/kuttl/placement/common/cleanup-placement.yaml b/test/kuttl/placement/common/cleanup-placement.yaml new file mode 100644 index 000000000..06857f7da --- /dev/null +++ b/test/kuttl/placement/common/cleanup-placement.yaml @@ -0,0 +1,6 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +delete: +- apiVersion: placement.openstack.org/v1beta1 + kind: PlacementAPI + name: placement diff --git a/test/kuttl/placement/common/errors_cleanup_placement.yaml b/test/kuttl/placement/common/errors_cleanup_placement.yaml new file mode 100644 index 000000000..4e0a769b6 --- /dev/null +++ b/test/kuttl/placement/common/errors_cleanup_placement.yaml @@ -0,0 +1,94 @@ +# +# Check for: +# +# No PlacementAPI CR +# No Deployment for PlacementAPI CR +# No Pods in placement Deployment +# No Placement Services +# +apiVersion: placement.openstack.org/v1beta1 +kind: PlacementAPI +metadata: + finalizers: + - openstack.org/placementapi + name: placement +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: placement +--- +# the openshift annotations can't be checked through the deployment above +apiVersion: v1 +kind: Pod +metadata: + annotations: + openshift.io/scc: anyuid + labels: + service: placement +--- +apiVersion: v1 +kind: Service +metadata: + labels: + admin: "true" + service: placement + name: placement-admin +spec: + ports: + - name: placement-admin + selector: + service: placement + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + labels: + endpoint: internal + service: placement + name: placement-internal +spec: + ports: + - name: placement-internal + selector: + service: placement + type: ClusterIP +--- +apiVersion: v1 +kind: Service +metadata: + labels: + endpoint: public + service: placement + name: placement-public +spec: + ports: + - name: placement-public + selector: + service: placement + type: ClusterIP +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + placement.openstack.org/name: placement + name: placement-scripts + ownerReferences: + - blockOwnerDeletion: true + controller: true + kind: PlacementAPI + name: placement +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + placement.openstack.org/name: placement + name: placement-config-data + ownerReferences: + - blockOwnerDeletion: true + controller: true + kind: PlacementAPI + name: placement diff --git a/test/kuttl/placement/common/patch_placement_deploy.yaml b/test/kuttl/placement/common/patch_placement_deploy.yaml new file mode 100644 index 000000000..5647c771b --- /dev/null +++ b/test/kuttl/placement/common/patch_placement_deploy.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + oc patch placementapi -n $NAMESPACE placement --type='json' -p='[{"op": "replace", "path": "/spec/secret", "value":"osp-secret"}]' diff --git a/test/kuttl/placement/common/placementapi_deploy.yaml b/test/kuttl/placement/common/placementapi_deploy.yaml new file mode 120000 index 000000000..1302e1646 --- /dev/null +++ b/test/kuttl/placement/common/placementapi_deploy.yaml @@ -0,0 +1 @@ +../../../config/samples/placement_v1beta1_placementapi.yaml \ No newline at end of file diff --git a/test/kuttl/placement/common/tls_certificates.yaml b/test/kuttl/placement/common/tls_certificates.yaml new file mode 100644 index 000000000..76e826947 --- /dev/null +++ b/test/kuttl/placement/common/tls_certificates.yaml @@ -0,0 +1,31 @@ +# Hardcoded certs secret, so kuttl doesn't require cert-manager at test runtime +apiVersion: v1 +kind: Secret +metadata: + name: combined-ca-bundle + labels: + service: placement +data: + tls-ca-bundle.pem: IyByb290Y2EtaW50ZXJuYWwKLS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJmekNDQVNhZ0F3SUJBZ0lRUWxlcTNZcDBtU2kwVDNiTm03Q29UVEFLQmdncWhrak9QUVFEQWpBZ01SNHcKSEFZRFZRUURFeFZ5YjI5MFkyRXRhM1YwZEd3dGFXNTBaWEp1WVd3d0hoY05NalF3TVRFMU1URTBOelUwV2hjTgpNelF3TVRFeU1URTBOelUwV2pBZ01SNHdIQVlEVlFRREV4VnliMjkwWTJFdGEzVjBkR3d0YVc1MFpYSnVZV3d3CldUQVRCZ2NxaGtqT1BRSUJCZ2dxaGtqT1BRTUJCd05DQUFTRk9rNHJPUldVUGhoTjUrK09EN1I2MW5Gb1lBY0QKenpvUS91SW93NktjeGhwRWNQTDFxb3ZZUGxUYUJabEh3c2FpNE50VHA4aDA1RHVRSGZKOE9JNXFvMEl3UURBTwpCZ05WSFE4QkFmOEVCQU1DQXFRd0R3WURWUjBUQVFIL0JBVXdBd0VCL3pBZEJnTlZIUTRFRmdRVXE3TGtFSk1TCm1MOVpKWjBSOUluKzZkclhycEl3Q2dZSUtvWkl6ajBFQXdJRFJ3QXdSQUlnVlN1K00ydnZ3QlF3eTJHMVlhdkkKQld2RGtSNlRla0I5U0VqdzJIblRSMWtDSUZSNFNkWGFPQkFGWjVHa2RLWCtSY2IzaDFIZm52eFJEVW96bTl2agphenp3Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0KIyByb290Y2EtcHVibGljCi0tLS0tQkVHSU4gQ0VSVElGSUNBVEUtLS0tLQpNSUlCZXpDQ0FTS2dBd0lCQWdJUU5IREdZc0JzNzk4aWJERDdxL28ybGpBS0JnZ3Foa2pPUFFRREFqQWVNUnd3CkdnWURWUVFERXhOeWIyOTBZMkV0YTNWMGRHd3RjSFZpYkdsak1CNFhEVEkwTURFeE5URXdNVFV6TmxvWERUTTAKTURFeE1qRXdNVFV6Tmxvd0hqRWNNQm9HQTFVRUF4TVRjbTl2ZEdOaExXdDFkSFJzTFhCMVlteHBZekJaTUJNRwpCeXFHU000OUFnRUdDQ3FHU000OUF3RUhBMElBQkQ3OGF2WHFocmhDNXc4czlXa2Q0SXBiZUV1MDNDUitYWFVkCmtEek9SeXhhOXdjY0lkRGl2YkdKakpGWlRUY1ZtYmpxMUJNWXNqcjEyVUlFNUVUM1ZscWpRakJBTUE0R0ExVWQKRHdFQi93UUVBd0lDcERBUEJnTlZIUk1CQWY4RUJUQURBUUgvTUIwR0ExVWREZ1FXQkJUS0ppeldVSjllVUtpMQpkczBscjZjNnNEN0VCREFLQmdncWhrak9QUVFEQWdOSEFEQkVBaUJJWndZcTYxQnFNSmFCNlVjRm9Sc3hlY3dICjV6L3pNT2RyT3llMG1OaThKZ0lnUUxCNHdES3JwZjl0WDJsb00rMHVUb3BBRFNZSW5yY2ZWdTRGQnVZVTNJZz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= +--- +apiVersion: v1 +kind: Secret +metadata: + name: cert-internal-svc + labels: + service: placement +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJkVENDQVJxZ0F3SUJBZ0lRTkZUVDE2eTc0RGJaOGJTL25ESDBkakFLQmdncWhrak9QUVFEQWpBYU1SZ3cKRmdZRFZRUURFdzl5YjI5MFkyRXRhVzUwWlhKdVlXd3dIaGNOTWpRd01URXdNVFV5T0RBMFdoY05NalF3TkRBNQpNVFV5T0RBMFdqQWFNUmd3RmdZRFZRUURFdzl5YjI5MFkyRXRhVzUwWlhKdVlXd3dXVEFUQmdjcWhrak9QUUlCCkJnZ3Foa2pPUFFNQkJ3TkNBQVFjK2d5OVFCNmw1NFNBQlkxUTJKZWx5MEhSTGEvMzlkRUxzU2RhNnJDRENKQWwKWjJ2bGlGbUo5WVlJNCtSbGRIejJWNXYvYjBpK2x0RjcxMGZ1OHJTbW8wSXdRREFPQmdOVkhROEJBZjhFQkFNQwpBcVF3RHdZRFZSMFRBUUgvQkFVd0F3RUIvekFkQmdOVkhRNEVGZ1FVeUsyc0hXaUxHNnR6bWlVbENkUmhsRTJLCnNHSXdDZ1lJS29aSXpqMEVBd0lEU1FBd1JnSWhBSzVtTi9zQlBVcXAwckd1QjhnMVRxY21KR3ZMVUpyNjlnaEEKaEozMldCT1BBaUVBbEtwU0dVTzhac25UcVQrQ1hWbXNuWkxBcVJMV1NhbUI5U2NyczNDZ05zWT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNhekNDQWhHZ0F3SUJBZ0lSQU1GRmpzWkpHY3BuaVBFNXNmQytrOEV3Q2dZSUtvWkl6ajBFQXdJd0dqRVkKTUJZR0ExVUVBeE1QY205dmRHTmhMV2x1ZEdWeWJtRnNNQjRYRFRJME1ERXhOVEV4TkRnMU1sb1hEVE0wTURFeApNakV4TkRnMU1sb3dBRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQkFNRzhQSWwzCnc4RXdXMHdUUG5qRURpU2dTdVI4WHJaajcrSjYyUkJMTHJ3ZUxKdWd1Wm1MaUh3M09uSldWa0hEOVpaZzlYSGUKbGZ6UDY3Wi8rYXBNMzJ5VWJTVUcrRjlBdXlGMHRTK2lPODFkUFRSY1luNzVBK0xWdnk1UkVpOGIvTFkzNTNPbgpxUEhuK2kyeTNLUC9HZkhjSi9lVlVXNFJkV2wyTHEyejRtRDRUK2twS0VwSnRGSTJQa2lrSVNOV2RRdmtEeW1WClF3a1B3U01FVy9yaEdGL2s3b0gvVWtwdy9wU1N1R0M2a1lpSnlwOTFHT0xCMlVoc254Z3dLelh5VS9MdGFrZXoKS2RHSFUvNUNLTTRKczg0ZnlNTDBBNXMxalpZQXZEWkVLNEgvYVpCb3EzV0NoQ1R4WWhIOVVuczhIQy9KbHJCMApHaitwVHNuaEc2cUlFQ2tDQXdFQUFhT0JoakNCZ3pBT0JnTlZIUThCQWY4RUJBTUNCYUF3RXdZRFZSMGxCQXd3CkNnWUlLd1lCQlFVSEF3RXdEQVlEVlIwVEFRSC9CQUl3QURBZkJnTlZIU01FR0RBV2dCVElyYXdkYUlzYnEzT2EKSlNVSjFHR1VUWXF3WWpBdEJnTlZIUkVCQWY4RUl6QWhnaDlyWlhsemRHOXVaUzFwYm5SbGNtNWhiQzV2Y0dWdQpjM1JoWTJzdWMzWmpNQW9HQ0NxR1NNNDlCQU1DQTBnQU1FVUNJRTFJYXcxcnRnU0ROZmxBSjJRek9VQjJxU1llCk03ZWdsaXZLVW01cmVOZThBaUVBMU93SGcwQ1YxOUNhYUpSSi9SS25UcXNJTGhNdjBEUVNPdnFwbWc0MWZDTT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBd2J3OGlYZkR3VEJiVEJNK2VNUU9KS0JLNUh4ZXRtUHY0bnJaRUVzdXZCNHNtNkM1Cm1ZdUlmRGM2Y2xaV1FjUDFsbUQxY2Q2Vi9NL3J0bi81cWt6ZmJKUnRKUWI0WDBDN0lYUzFMNkk3elYwOU5GeGkKZnZrRDR0Vy9MbEVTTHh2OHRqZm5jNmVvOGVmNkxiTGNvLzhaOGR3bjk1VlJiaEYxYVhZdXJiUGlZUGhQNlNrbwpTa20wVWpZK1NLUWhJMVoxQytRUEtaVkRDUS9CSXdSYit1RVlYK1R1Z2Y5U1NuRCtsSks0WUxxUmlJbktuM1VZCjRzSFpTR3lmR0RBck5mSlQ4dTFxUjdNcDBZZFQva0lvemdtenpoL0l3dlFEbXpXTmxnQzhOa1FyZ2Y5cGtHaXIKZFlLRUpQRmlFZjFTZXp3Y0w4bVdzSFFhUDZsT3llRWJxb2dRS1FJREFRQUJBb0lCQVFDQ0hweUdNK05OY040UQo1V2Z6RXJMeEZKdlloRlBVcXFDbWU1NG9uR1ppUU4zekZPc3pYbzBuNkt3ZnVTOHI4cUtUQXNJM1hhbGRhSVRIClNZTDFSN1pVSmdoOGN3Y0VhdVNFbnU5R2MrODRpbVFlTStLUHAwNWQzdlFOOXJPQTRvcEVGSjRtaHJnbzZZYVYKaE9rK1dJc2piNXVFWlV5UTRiYjdRejRzdW9IVVlDYXFkVGlqU1lYQzNOd092YUlwa3pTNEo0cU5CUlhyYnNWSwowaGt4ZFNIY1hKNEREN0hybktpcEsxT2xUbUVObVZYbmlaNnRPcWc2eUNFeXFteWN0UnlUVTZRRzVPbVM2clJVCm82Z25EclA1TlgwRUhuakY3b1lka0JVbGJxWk96UHVGbG5CdUVKOFpTUEtOZHA5ejhuS3lqbGJiL0YxWGRDdEkKZERhVUhmREZBb0dCQVBFZkZZbDhPb2VhU21oSElKcGpxd3RCanYrRjFuOXNJbHNuZWNyQ1JmN243RW53d1hXaQpReStXQ3l6aDJGRVVad1dod2RQeXFJT3NVaG1vaXBIQmN3NlVUaW9xalM4SlpvSDlURFBQUEd5OXIwMHZwRkNuCnFkdjNXMkhWVytRckMrWk1nc2ZKdUlTTnFtbFdFeHpCNFBJQWRHQTdKVzFMY0ZCcG1Zd25DdXZyQW9HQkFNMncKbS94cVRhMmgySjFnNUI0elE5UnBhM280SEoyL2pTaHEzNW9heVNGNWJDYWtnWGRxek0yU0FwQ0x4dzlvY3doRAp3WWRaMWliaHl6b1dDQVZZZ0RlaXViUi96ZTN3Nzk4NktScUNmNnptMk5HOEoxODVDZDdKSjBiaTZBTTgvalpTCnFqWkJIK0FqanF2aFFJM0FMMEdzNlFvc2Z3L3hOL2k1cG00UWM5TTdBb0dBZjZCbFpQVmxnWnN3WVV1c3ZTdWUKUUlIOTc5Qm12ZUY5dWVRR09rVmtpVTAzSzlnTWZuaFp1WmxnNXV2UDlQS29xVGw2Zi9aRUxoWUxDdHZFSk94UgpPMWxTbWswVmw5MFE3aU1scjVLMHVCWWE4TzhUdVVGVnprRjZsQ2s3ejJUZGtwUFM4VzhiaE1YN2VtLytBODIzCmhFQ3JXTGhWMGlrSkZQY2dPQ2YrUnVzQ2dZQTYvcld1cnhxNmUxb3l3WENNVE8zZWhhSUMrd2NTSTdlcjZRTmIKSXVXZlNVRkEwQndtRVNiT3ExczY5Q3hTK2dWTVVJcTRkSWJjdmhSWkE2cW5SZHY0bVI2a2E2ZTM0RXdjZllUKwppb0Z1S1FQMUcvODY2NVF1SndteDVqRGZoT1h3MU1MbkxzU2l0L0FhMGs5K21LbTFMNC9qa0NHZGcvVW16TEMwCmp0bDVzd0tCZ0VPTVI3ODVLT2hyNXFoWmE2b0MvU25JeEptS1FxTWdXU0NGV1pGMDZrVlRnSmthb1hwUEl0bUIKOUZGbE1nTTJSeC91S2V3YTNDSTdQK240ek1uYSswTmhDL0RwNkMxVFVsVWlrcnJYQ3I5a1NPR2dXaEFISDljTwozRENvdkhOcE1PaG51dnhoMlpDeTdYbjFJeGgxWXdlYnVobFZzeTFvR0tDQ0lJb00rOVg1Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg== +--- +apiVersion: v1 +kind: Secret +metadata: + name: cert-public-svc + labels: + service: placement +data: + ca.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUJlekNDQVNLZ0F3SUJBZ0lRTkhER1lzQnM3OThpYkREN3EvbzJsakFLQmdncWhrak9QUVFEQWpBZU1Sd3cKR2dZRFZRUURFeE55YjI5MFkyRXRhM1YwZEd3dGNIVmliR2xqTUI0WERUSTBNREV4TlRFd01UVXpObG9YRFRNMApNREV4TWpFd01UVXpObG93SGpFY01Cb0dBMVVFQXhNVGNtOXZkR05oTFd0MWRIUnNMWEIxWW14cFl6QlpNQk1HCkJ5cUdTTTQ5QWdFR0NDcUdTTTQ5QXdFSEEwSUFCRDc4YXZYcWhyaEM1dzhzOVdrZDRJcGJlRXUwM0NSK1hYVWQKa0R6T1J5eGE5d2NjSWREaXZiR0pqSkZaVFRjVm1ianExQk1Zc2pyMTJVSUU1RVQzVmxxalFqQkFNQTRHQTFVZApEd0VCL3dRRUF3SUNwREFQQmdOVkhSTUJBZjhFQlRBREFRSC9NQjBHQTFVZERnUVdCQlRLSml6V1VKOWVVS2kxCmRzMGxyNmM2c0Q3RUJEQUtCZ2dxaGtqT1BRUURBZ05IQURCRUFpQklad1lxNjFCcU1KYUI2VWNGb1JzeGVjd0gKNXovek1PZHJPeWUwbU5pOEpnSWdRTEI0d0RLcnBmOXRYMmxvTSswdVRvcEFEU1lJbnJjZlZ1NEZCdVlVM0lnPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUNiVENDQWhPZ0F3SUJBZ0lSQUtacXlMbUhLNC9VRTZmMi9LNWxiQnN3Q2dZSUtvWkl6ajBFQXdJd0hqRWMKTUJvR0ExVUVBeE1UY205dmRHTmhMV3QxZEhSc0xYQjFZbXhwWXpBZUZ3MHlOREF4TVRVeE1ESXdOVFJhRncwegpOREF4TVRJeE1ESXdOVFJhTUFBd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3Z2dFS0FvSUJBUUMxCjhDcFJRVG1abHNzSUlmZ2hIK2ltUUtFMFdZVlJOeS8vMVM0aDVtV2tBcUZiVkhoUmptbFJ2cCtQUWpKOU16TDUKMXpXdmYxandEQ2pzYUxvL2FwSW9OSXJIcjN4TTRoYWl0emU0RjFwZzNoL3MvblExNWN5Q2U5dHdHR0RuWEllMwo2djBuNE9LNnAwSWJjcVk2Q1RBMTBwcGJZa3V6bzdVRkx6ZWxsc1ZhRlhzZ21JWDg4bTRXNmNBTi84cjJPWUI3Ck9HM0ZNOXAxSUFxT0hyT21EelFlTldqOUVjQy9TSCs5MGg4c1FyY1pvMWtWa1g1b2tpSUhDZjRlc2o3Q08rTGgKR3lsTmZyRzl6QTlPM0c3QVNDWVdPVWwyZTBhNHhZbE9QMmI4ejFEV3NIMTBVYXVsZHlRQXNtbkhtaW1VNzBmKwpEazZkQ1hXVHN4cGZ2cXphOVR4YkFnTUJBQUdqZ1lRd2dZRXdEZ1lEVlIwUEFRSC9CQVFEQWdXZ01CTUdBMVVkCkpRUU1NQW9HQ0NzR0FRVUZCd01CTUF3R0ExVWRFd0VCL3dRQ01BQXdId1lEVlIwakJCZ3dGb0FVeWlZczFsQ2YKWGxDb3RYYk5KYStuT3JBK3hBUXdLd1lEVlIwUkFRSC9CQ0V3SDRJZGEyVjVjM1J2Ym1VdGNIVmliR2xqTG05dwpaVzV6ZEdGamF5NXpkbU13Q2dZSUtvWkl6ajBFQXdJRFNBQXdSUUlnTzAzT2JmNm9uV2RiZG4xa282OVpuTFhMCmtQSHFYU3VRNlcxTDFvY3NDR3NDSVFEakEyVm9pWVdYN0hzSjVGNkZYV3FsZnl0RmduVVgvTmhvT1lIVnB2TWQKSGc9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg== + tls.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFcEFJQkFBS0NBUUVBdGZBcVVVRTVtWmJMQ0NINElSL29wa0NoTkZtRlVUY3YvOVV1SWVabHBBS2hXMVI0ClVZNXBVYjZmajBJeWZUTXkrZGMxcjM5WThBd283R2k2UDJxU0tEU0t4Njk4VE9JV29yYzN1QmRhWU40ZjdQNTAKTmVYTWdudmJjQmhnNTF5SHQrcjlKK0RpdXFkQ0czS21PZ2t3TmRLYVcySkxzNk8xQlM4M3BaYkZXaFY3SUppRgovUEp1RnVuQURmL0s5am1BZXpodHhUUGFkU0FLamg2enBnODBIalZvL1JIQXYwaC92ZElmTEVLM0dhTlpGWkYrCmFKSWlCd24rSHJJK3dqdmk0UnNwVFg2eHZjd1BUdHh1d0VnbUZqbEpkbnRHdU1XSlRqOW0vTTlRMXJCOWRGR3IKcFhja0FMSnB4NW9wbE85SC9nNU9uUWwxazdNYVg3NnMydlU4V3dJREFRQUJBb0lCQUd0eVdvdUNLYkk3Qzh6Ugp3dWhOSCtpUFlxUzMrYlB0RTd2UytsdXE1WHZtMGNST0xvQjd5bGNzYks3K09UTVhlWk56TlpGZmMvYlFONXJtCmZwZlZLRnYySzcraU01WjBMMG9KU2k2K0cvSDVQSUdLQkxlUDd5ZGdYa2ZsSGRXRkgrSE9OWlBIakI4UGlFc04KZW4zcnp6ejZFNDdFamxDWTdkOFI4NXNuWDRYREN2bG1CQnhvcnpqVERuK1dTWWpKS09SSk5zY3oxQXFYR1VjVwpQaHRNYkwybC8zN2hPbTA4SjRRWXowTWduOWE5VUFXLzFNS2lXbHVpc1NHNG9YaFNPS1hkdk1IS3VxS09sUDJzCk9xWjBlR3JBNmpKdWlmZVY2Q2NIU2p3VUgwdHpiMmdZQVM2cm5RQlREbFkxR1I4Skx0YWhWREtqdUwyV0hjclkKbHhCOGZBRUNnWUVBMWpxc01weFo5cG9LNkpmRDZzVTU0ZUU1UWlNYlB0MERRZjlYU1J6NW5zQXlraHdKWEZDVwpKWTNiU3BhcGREeEgzakpDQ0VzN3NSWUhUeDRzNVJQSldtWC9oZTFwVEs4TDVlaFV2TmVudG5nVTB3aE4rZVEzCjl4Sk1VbHVYdGkvU0FpNi9jQk5HY1ZjQjFGRStmVzY0VDhqYVVQakRrL0Z2dFRXOXkzZnNVZnNDZ1lFQTJXbXMKYStuZ2RaS24rVTlCMlFCTXB0K0RLL0txNVF6RW1qZDVMSjJMb3FLUjhGbjlpVVZoUVljUEpobDJVV3VjTTl0RQp0QUlYdEY0anVUejlqUUNMMGQ5a09DeCswdTBKUFZJejdlVmFFVGs1enF0azRsTnhVUkJhQ3pCUEJkdjZJd3BDCkR4UXJWRXBXYlMra1JvYTNKSElOdHdjUWt2Nk50N1JIajd1WEVTRUNnWUJsREozbTdZc2Q0QUZmUHg4Qm9YQXgKRkt5T2ZzSytQei9uSkl0R2lHMVNMWFJ0S041ZGRnR3N5eUh5SitqY1ZBYk9UMFNJWnZ4TUJva0NEOGk3Y1Q3Ygo3aHErVUlNSDBkVzU1NEg0NVh4TmZJek9FaSs5dktHTllFc3gyZFJROG5PTDVnTVUyWEt6eVllcVgzd3JiRXR5CkR0cXpzUE9IMkMySiswU0FNaHY5ZXdLQmdRQ2szeWs5TUwvaUNWUk9rTmNybTdtRk5xeS9rQ2dleU43eTRDeUoKTS9RbllrZHYwSjZmRWJrZU96QzJ3TXBrRmtuL1hVR3RqSVN6YUV5STlnS0ZnaXVGL1hWL3orWmhTQllncFl6eAoxR0xIK3ZDbWxIMU4wTjkzRFFKcng3ZTFoc3NhOVhXQS85ZVg5VU96UzFTMWt3V2hvc2haeXdhN29rU1FVaXVPCmlVQ1hZUUtCZ1FEUVZUVHc3WUY3QzNTVmg5OWRObUdTaHV2LzZ2aTJmNDlOMklGMURNQ1haaEpoOUVZck9TV2kKY05oakxGRFhmdzVlZlFURWU3Ykx5bTJGVDd0YnZFSm5USHFyakVuUDRUWExqZnczL3RiQ3RxWVNZRlRqdThFUApadHVwd21ZWjhFVU1pSnVHS2l2SExmSjk2dy8xR21BOHVCZUVtV05YRW9FUU1ySmxuM3g5d3c9PQotLS0tLUVORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= diff --git a/test/kuttl/placement/go_templates/apiEndpoints.gotemplate b/test/kuttl/placement/go_templates/apiEndpoints.gotemplate new file mode 100644 index 000000000..dcddb1070 --- /dev/null +++ b/test/kuttl/placement/go_templates/apiEndpoints.gotemplate @@ -0,0 +1 @@ +{{.spec.endpoints.internal}}{{":"}}{{.spec.endpoints.public}}{{"\n"}} diff --git a/test/kuttl/placement/lib/helper_functions.sh b/test/kuttl/placement/lib/helper_functions.sh new file mode 100755 index 000000000..535b9e07a --- /dev/null +++ b/test/kuttl/placement/lib/helper_functions.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +assert_regex() { + + if [[ $1 =~ $2 ]]; then + exit 0 + else + printf '%s\n' "Regex check '$2' failed against oc output: $1"; + exit 1 + fi +} + +"$@" diff --git a/test/kuttl/placement/tests/placement_deploy_tls/00-cleanup-placement.yaml b/test/kuttl/placement/tests/placement_deploy_tls/00-cleanup-placement.yaml new file mode 120000 index 000000000..e067bd151 --- /dev/null +++ b/test/kuttl/placement/tests/placement_deploy_tls/00-cleanup-placement.yaml @@ -0,0 +1 @@ +../../common/cleanup-placement.yaml \ No newline at end of file diff --git a/test/kuttl/placement/tests/placement_deploy_tls/01-assert.yaml b/test/kuttl/placement/tests/placement_deploy_tls/01-assert.yaml new file mode 100644 index 000000000..119adc661 --- /dev/null +++ b/test/kuttl/placement/tests/placement_deploy_tls/01-assert.yaml @@ -0,0 +1,14 @@ +apiVersion: v1 +kind: Secret +metadata: + name: cert-internal-svc +--- +apiVersion: v1 +kind: Secret +metadata: + name: cert-public-svc +--- +apiVersion: v1 +kind: Secret +metadata: + name: combined-ca-bundle diff --git a/test/kuttl/placement/tests/placement_deploy_tls/01-tls_certificates.yaml b/test/kuttl/placement/tests/placement_deploy_tls/01-tls_certificates.yaml new file mode 120000 index 000000000..b63d9bfb8 --- /dev/null +++ b/test/kuttl/placement/tests/placement_deploy_tls/01-tls_certificates.yaml @@ -0,0 +1 @@ +../../common/tls_certificates.yaml \ No newline at end of file diff --git a/test/kuttl/placement/tests/placement_deploy_tls/02-placementapi_deploy_tls.yaml b/test/kuttl/placement/tests/placement_deploy_tls/02-placementapi_deploy_tls.yaml new file mode 120000 index 000000000..11447fab8 --- /dev/null +++ b/test/kuttl/placement/tests/placement_deploy_tls/02-placementapi_deploy_tls.yaml @@ -0,0 +1 @@ +../../../../config/samples/placement_v1beta1_placementtls.yaml \ No newline at end of file diff --git a/test/kuttl/placement/tests/placement_deploy_tls/03-assert.yaml b/test/kuttl/placement/tests/placement_deploy_tls/03-assert.yaml new file mode 100644 index 000000000..207b4bd46 --- /dev/null +++ b/test/kuttl/placement/tests/placement_deploy_tls/03-assert.yaml @@ -0,0 +1,217 @@ +apiVersion: placement.openstack.org/v1beta1 +kind: PlacementAPI +metadata: + finalizers: + - openstack.org/placementapi + name: placement +spec: + customServiceConfig: | + [DEFAULT] + debug = true + databaseInstance: openstack + databaseAccount: placement + passwordSelectors: + service: PlacementPassword + preserveJobs: false + replicas: 1 + secret: osp-secret + serviceUser: placement + tls: + api: + internal: + secretName: cert-internal-svc + public: + secretName: cert-public-svc + caBundleSecretName: combined-ca-bundle +status: + databaseHostname: openstack.placement-kuttl-tests.svc + readyCount: 1 + conditions: + - message: Setup complete + reason: Ready + status: "True" + type: Ready + - message: Create service completed + reason: Ready + status: "True" + type: CreateServiceReady + - message: DB create completed + reason: Ready + status: "True" + type: DBReady + - message: DBsync completed + reason: Ready + status: "True" + type: DBSyncReady + - message: Deployment completed + reason: Ready + status: "True" + type: DeploymentReady + - message: Input data complete + reason: Ready + status: "True" + type: InputReady + - message: Setup complete + reason: Ready + status: "True" + type: KeystoneEndpointReady + - message: Setup complete + reason: Ready + status: "True" + type: KeystoneServiceReady + - message: MariaDBAccount creation complete + reason: Ready + status: "True" + type: MariaDBAccountReady + - message: NetworkAttachments completed + reason: Ready + status: "True" + type: NetworkAttachmentsReady + - message: RoleBinding created + reason: Ready + status: "True" + type: RoleBindingReady + - message: Role created + reason: Ready + status: "True" + type: RoleReady + - message: ServiceAccount created + reason: Ready + status: "True" + type: ServiceAccountReady + - message: Service config create completed + reason: Ready + status: "True" + type: ServiceConfigReady + - message: Input data complete + reason: Ready + status: "True" + type: TLSInputReady +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: placement +spec: + replicas: 1 + template: + metadata: + labels: + service: placement + spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchExpressions: + - key: service + operator: In + values: + - placement + topologyKey: kubernetes.io/hostname + weight: 100 + containers: + - args: + - --single-child + - -- + - /usr/bin/tail + - -n+1 + - -F + - /var/log/placement/placement-api.log + command: + - /usr/bin/dumb-init + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8778 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 30 + name: placement-log + readinessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8778 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 30 + resources: {} + securityContext: + runAsUser: 42482 + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /usr/local/bin/container-scripts + name: scripts + readOnly: true + - mountPath: /var/log/placement + name: logs + - mountPath: /var/lib/openstack/config + name: config-data + - mountPath: /var/lib/kolla/config_files/config.json + name: config-data + readOnly: true + subPath: placement-api-config.json + - mountPath: /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem + name: combined-ca-bundle + readOnly: true + subPath: tls-ca-bundle.pem + - mountPath: /var/lib/config-data/tls/certs/internal.crt + name: internal-tls-certs + readOnly: true + subPath: tls.crt + - mountPath: /var/lib/config-data/tls/private/internal.key + name: internal-tls-certs + readOnly: true + subPath: tls.key + - mountPath: /var/lib/config-data/tls/certs/public.crt + name: public-tls-certs + readOnly: true + subPath: tls.crt + - mountPath: /var/lib/config-data/tls/private/public.key + name: public-tls-certs + readOnly: true + subPath: tls.key + - args: + - -c + - /usr/local/bin/kolla_start + command: + - /bin/bash + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8778 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 30 + name: placement-api + readinessProbe: + failureThreshold: 3 + httpGet: + path: / + port: 8778 + scheme: HTTPS + initialDelaySeconds: 5 + periodSeconds: 30 + successThreshold: 1 + timeoutSeconds: 30 + resources: {} + restartPolicy: Always + securityContext: {} + serviceAccount: placement-placement + serviceAccountName: placement-placement +status: + availableReplicas: 1 + replicas: 1 diff --git a/test/kuttl/placement/tests/placement_deploy_tls/03-patch_placement_deploy.yaml b/test/kuttl/placement/tests/placement_deploy_tls/03-patch_placement_deploy.yaml new file mode 120000 index 000000000..82362fc71 --- /dev/null +++ b/test/kuttl/placement/tests/placement_deploy_tls/03-patch_placement_deploy.yaml @@ -0,0 +1 @@ +../../common/patch_placement_deploy.yaml \ No newline at end of file diff --git a/test/kuttl/placement/tests/placement_deploy_tls/04-cleanup-placement.yaml b/test/kuttl/placement/tests/placement_deploy_tls/04-cleanup-placement.yaml new file mode 120000 index 000000000..e067bd151 --- /dev/null +++ b/test/kuttl/placement/tests/placement_deploy_tls/04-cleanup-placement.yaml @@ -0,0 +1 @@ +../../common/cleanup-placement.yaml \ No newline at end of file diff --git a/test/kuttl/placement/tests/placement_deploy_tls/04-errors.yaml b/test/kuttl/placement/tests/placement_deploy_tls/04-errors.yaml new file mode 120000 index 000000000..b05cc60f8 --- /dev/null +++ b/test/kuttl/placement/tests/placement_deploy_tls/04-errors.yaml @@ -0,0 +1 @@ +../../common/errors_cleanup_placement.yaml \ No newline at end of file diff --git a/test/kuttl/placement/tests/placement_scale/00-cleanup-placement.yaml b/test/kuttl/placement/tests/placement_scale/00-cleanup-placement.yaml new file mode 120000 index 000000000..e067bd151 --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/00-cleanup-placement.yaml @@ -0,0 +1 @@ +../../common/cleanup-placement.yaml \ No newline at end of file diff --git a/test/kuttl/placement/tests/placement_scale/01-deploy_placement.yaml b/test/kuttl/placement/tests/placement_scale/01-deploy_placement.yaml new file mode 120000 index 000000000..748d21948 --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/01-deploy_placement.yaml @@ -0,0 +1 @@ +../../common/placementapi_deploy.yaml \ No newline at end of file diff --git a/test/kuttl/placement/tests/placement_scale/02-assert.yaml b/test/kuttl/placement/tests/placement_scale/02-assert.yaml new file mode 120000 index 000000000..461654ea9 --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/02-assert.yaml @@ -0,0 +1 @@ +../../common/assert_sample_deployment.yaml \ No newline at end of file diff --git a/test/kuttl/placement/tests/placement_scale/02-patch_placement_deploy.yaml b/test/kuttl/placement/tests/placement_scale/02-patch_placement_deploy.yaml new file mode 120000 index 000000000..82362fc71 --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/02-patch_placement_deploy.yaml @@ -0,0 +1 @@ +../../common/patch_placement_deploy.yaml \ No newline at end of file diff --git a/test/kuttl/placement/tests/placement_scale/03-assert.yaml b/test/kuttl/placement/tests/placement_scale/03-assert.yaml new file mode 100644 index 000000000..c3534b707 --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/03-assert.yaml @@ -0,0 +1,26 @@ +# +# Check for: +# +# - 1 placementAPI CR +# - 3 Pods for placementAPI CR +# + +apiVersion: placement.openstack.org/v1beta1 +kind: PlacementAPI +metadata: + finalizers: + - openstack.org/placementapi + name: placement +spec: + replicas: 3 +status: + readyCount: 3 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: placement +spec: + replicas: 3 +status: + availableReplicas: 3 diff --git a/test/kuttl/placement/tests/placement_scale/03-scale-placementapi.yaml b/test/kuttl/placement/tests/placement_scale/03-scale-placementapi.yaml new file mode 100644 index 000000000..69f72843c --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/03-scale-placementapi.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + oc patch placementapi -n $NAMESPACE placement --type='json' -p='[{"op": "replace", "path": "/spec/replicas", "value":3}]' diff --git a/test/kuttl/placement/tests/placement_scale/04-assert.yaml b/test/kuttl/placement/tests/placement_scale/04-assert.yaml new file mode 100644 index 000000000..cfed8052b --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/04-assert.yaml @@ -0,0 +1,26 @@ +# +# Check for: +# +# - 1 PlacementAPI CR +# - 1 Pods for PlacementAPI CR +# + +apiVersion: placement.openstack.org/v1beta1 +kind: PlacementAPI +metadata: + finalizers: + - openstack.org/placementapi + name: placement +spec: + replicas: 1 +status: + readyCount: 1 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: placement +spec: + replicas: 1 +status: + availableReplicas: 1 diff --git a/test/kuttl/placement/tests/placement_scale/04-scale-down-placementapi.yaml b/test/kuttl/placement/tests/placement_scale/04-scale-down-placementapi.yaml new file mode 100644 index 000000000..4ddad1ca6 --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/04-scale-down-placementapi.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + oc patch placementapi -n $NAMESPACE placement --type='json' -p='[{"op": "replace", "path": "/spec/replicas", "value":1}]' diff --git a/test/kuttl/placement/tests/placement_scale/05-assert.yaml b/test/kuttl/placement/tests/placement_scale/05-assert.yaml new file mode 100644 index 000000000..209ec4645 --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/05-assert.yaml @@ -0,0 +1,23 @@ +# +# Check for: +# +# - 1 PlacementAPI CR with 0 replicas +# - Placement Deployment with 0 Pods +# NOTE: This test is asserting for spec.replicas to be 0 +# NOT status.availableReplicas + +apiVersion: placement.openstack.org/v1beta1 +kind: PlacementAPI +metadata: + finalizers: + - openstack.org/placementapi + name: placement +spec: + replicas: 0 +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: placement +spec: + replicas: 0 diff --git a/test/kuttl/placement/tests/placement_scale/05-scale-down-zero-placementapi.yaml b/test/kuttl/placement/tests/placement_scale/05-scale-down-zero-placementapi.yaml new file mode 100644 index 000000000..34306eece --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/05-scale-down-zero-placementapi.yaml @@ -0,0 +1,5 @@ +apiVersion: kuttl.dev/v1beta1 +kind: TestStep +commands: + - script: | + oc patch placementapi -n $NAMESPACE placement --type='json' -p='[{"op": "replace", "path": "/spec/replicas", "value":0}]' diff --git a/test/kuttl/placement/tests/placement_scale/06-cleanup-placement.yaml b/test/kuttl/placement/tests/placement_scale/06-cleanup-placement.yaml new file mode 120000 index 000000000..e067bd151 --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/06-cleanup-placement.yaml @@ -0,0 +1 @@ +../../common/cleanup-placement.yaml \ No newline at end of file diff --git a/test/kuttl/placement/tests/placement_scale/06-errors.yaml b/test/kuttl/placement/tests/placement_scale/06-errors.yaml new file mode 120000 index 000000000..b05cc60f8 --- /dev/null +++ b/test/kuttl/placement/tests/placement_scale/06-errors.yaml @@ -0,0 +1 @@ +../../common/errors_cleanup_placement.yaml \ No newline at end of file