From 18afc2f3dd280cd7e680f10a12d19248e38bc43f Mon Sep 17 00:00:00 2001 From: Eneman Donatien Date: Fri, 14 Feb 2025 10:00:01 +0100 Subject: [PATCH] [ENH] :sparkles: add s3instance object + update deps + unitest + chart in repo + refacto --- .github/workflows/ci-chart.yaml | 60 ++ .github/workflows/{ci.yaml => ci-docker.yaml} | 47 +- .github/workflows/ci-go.yaml | 41 + .github/workflows/release.yaml | 40 + .gitignore | 4 +- Dockerfile | 9 +- LICENSE | 28 +- Makefile | 14 +- PROJECT | 20 +- README.md | 79 +- api/v1alpha1/bucket_types.go | 20 +- api/v1alpha1/path_types.go | 12 +- api/v1alpha1/policy_types.go | 12 +- api/v1alpha1/s3instance_types.go | 103 ++ api/v1alpha1/s3user_types.go | 12 +- api/v1alpha1/types.go | 17 + api/v1alpha1/zz_generated.deepcopy.go | 101 ++ main.go => cmd/main.go | 146 +-- config/crd/bases/s3.onyxia.sh_buckets.yaml | 32 +- config/crd/bases/s3.onyxia.sh_paths.yaml | 31 +- config/crd/bases/s3.onyxia.sh_policies.yaml | 31 +- .../crd/bases/s3.onyxia.sh_s3instances.yaml | 165 ++++ config/crd/bases/s3.onyxia.sh_s3users.yaml | 31 +- config/rbac/role.yaml | 73 +- .../samples/s3.onyxia.sh_v1alpha1_bucket.yaml | 6 - .../samples/s3.onyxia.sh_v1alpha1_path.yaml | 6 - .../samples/s3.onyxia.sh_v1alpha1_policy.yaml | 6 - .../s3.onyxia.sh_v1alpha1_s3instance.yaml | 31 + .../samples/s3.onyxia.sh_v1alpha1_s3user.yaml | 6 - controllers/bucket_controller.go | 267 ------ controllers/path_controller.go | 233 ----- controllers/policy_controller.go | 241 ----- controllers/user_controller.go | 559 ----------- controllers/utils/utils.go | 29 - deploy/charts/s3-operator/.helmignore | 23 + deploy/charts/s3-operator/Chart.yaml | 21 + deploy/charts/s3-operator/README.md | 32 + .../charts/s3-operator/templates/_helpers.tpl | 62 ++ .../s3-operator/templates/crds/buckets.yaml | 157 ++++ .../s3-operator/templates/crds/paths.yaml | 141 +++ .../s3-operator/templates/crds/policies.yaml | 140 +++ .../templates/crds/s3instances.yaml | 180 ++++ .../s3-operator/templates/crds/s3users.yaml | 144 +++ .../templates/default-s3instance.yaml | 69 ++ .../s3-operator/templates/deployment.yaml | 73 ++ .../templates/leader-election-rbac.yaml | 59 ++ .../s3-operator/templates/manager-rbac.yaml | 181 ++++ .../s3-operator/templates/serviceaccount.yaml | 9 + deploy/charts/s3-operator/values.yaml | 51 + docs/.gitkeep | 0 go.mod | 143 +-- go.sum | 366 +++----- internal/controller/bucket/constants.go | 19 + internal/controller/bucket/controller.go | 65 ++ internal/controller/bucket/finalizer.go | 112 +++ internal/controller/bucket/finalizer_test.go | 90 ++ internal/controller/bucket/reconcile.go | 488 ++++++++++ internal/controller/bucket/reconcile_test.go | 140 +++ internal/controller/bucket/status.go | 47 + internal/controller/path/constants.go | 19 + internal/controller/path/controller.go | 65 ++ internal/controller/path/finalizer.go | 142 +++ internal/controller/path/finalizer_test.go | 90 ++ internal/controller/path/reconcile.go | 292 ++++++ internal/controller/path/reconcile_test.go | 156 ++++ internal/controller/path/status.go | 48 + internal/controller/policy/constants.go | 19 + internal/controller/policy/controller.go | 65 ++ internal/controller/policy/finalizer.go | 131 +++ internal/controller/policy/finalizer_test.go | 90 ++ internal/controller/policy/reconcile.go | 389 ++++++++ internal/controller/policy/reconcile_test.go | 169 ++++ internal/controller/policy/status.go | 47 + internal/controller/s3instance/constants.go | 21 + internal/controller/s3instance/controller.go | 66 ++ internal/controller/s3instance/finalizer.go | 138 +++ .../controller/s3instance/finalizer_test.go | 335 +++++++ internal/controller/s3instance/reconcile.go | 201 ++++ .../controller/s3instance/reconcile_test.go | 156 ++++ internal/controller/s3instance/status.go | 47 + .../controller}/suite_test.go | 16 +- internal/controller/user/constants.go | 21 + internal/controller/user/controller.go | 93 ++ internal/controller/user/finalizer.go | 151 +++ internal/controller/user/finalizer_test.go | 133 +++ internal/controller/user/reconcile.go | 882 ++++++++++++++++++ internal/controller/user/reconcile_test.go | 302 ++++++ internal/controller/user/status.go | 47 + internal/controller/user/utils.go | 186 ++++ internal/helpers/S3instance_test.go | 228 +++++ internal/helpers/controller.go | 90 ++ internal/helpers/controller_test.go | 119 +++ .../helpers}/password_generator.go | 36 +- internal/helpers/password_generator_test.go | 32 + internal/helpers/s3instance.go | 251 +++++ .../s3/client/impl}/minioS3Client.go | 342 +++++-- .../s3/client/impl}/mockedS3Client.go | 56 +- .../s3/client/s3client.go | 61 +- internal/s3/factory/impl/s3factoryImpl.go | 41 + internal/s3/factory/s3factory.go | 25 + test/mocks/S3FactoryMock.go | 37 + test/mocks/mockedS3Client.go | 202 ++++ test/utils/testUtils.go | 188 ++++ 103 files changed, 9516 insertions(+), 2033 deletions(-) create mode 100644 .github/workflows/ci-chart.yaml rename .github/workflows/{ci.yaml => ci-docker.yaml} (61%) create mode 100644 .github/workflows/ci-go.yaml create mode 100644 .github/workflows/release.yaml create mode 100644 api/v1alpha1/s3instance_types.go create mode 100644 api/v1alpha1/types.go rename main.go => cmd/main.go (54%) create mode 100644 config/crd/bases/s3.onyxia.sh_s3instances.yaml create mode 100644 config/samples/s3.onyxia.sh_v1alpha1_s3instance.yaml delete mode 100644 controllers/bucket_controller.go delete mode 100644 controllers/path_controller.go delete mode 100644 controllers/policy_controller.go delete mode 100644 controllers/user_controller.go delete mode 100644 controllers/utils/utils.go create mode 100644 deploy/charts/s3-operator/.helmignore create mode 100644 deploy/charts/s3-operator/Chart.yaml create mode 100644 deploy/charts/s3-operator/README.md create mode 100644 deploy/charts/s3-operator/templates/_helpers.tpl create mode 100644 deploy/charts/s3-operator/templates/crds/buckets.yaml create mode 100644 deploy/charts/s3-operator/templates/crds/paths.yaml create mode 100644 deploy/charts/s3-operator/templates/crds/policies.yaml create mode 100644 deploy/charts/s3-operator/templates/crds/s3instances.yaml create mode 100644 deploy/charts/s3-operator/templates/crds/s3users.yaml create mode 100644 deploy/charts/s3-operator/templates/default-s3instance.yaml create mode 100644 deploy/charts/s3-operator/templates/deployment.yaml create mode 100644 deploy/charts/s3-operator/templates/leader-election-rbac.yaml create mode 100644 deploy/charts/s3-operator/templates/manager-rbac.yaml create mode 100644 deploy/charts/s3-operator/templates/serviceaccount.yaml create mode 100644 deploy/charts/s3-operator/values.yaml create mode 100644 docs/.gitkeep create mode 100644 internal/controller/bucket/constants.go create mode 100644 internal/controller/bucket/controller.go create mode 100644 internal/controller/bucket/finalizer.go create mode 100644 internal/controller/bucket/finalizer_test.go create mode 100644 internal/controller/bucket/reconcile.go create mode 100644 internal/controller/bucket/reconcile_test.go create mode 100644 internal/controller/bucket/status.go create mode 100644 internal/controller/path/constants.go create mode 100644 internal/controller/path/controller.go create mode 100644 internal/controller/path/finalizer.go create mode 100644 internal/controller/path/finalizer_test.go create mode 100644 internal/controller/path/reconcile.go create mode 100644 internal/controller/path/reconcile_test.go create mode 100644 internal/controller/path/status.go create mode 100644 internal/controller/policy/constants.go create mode 100644 internal/controller/policy/controller.go create mode 100644 internal/controller/policy/finalizer.go create mode 100644 internal/controller/policy/finalizer_test.go create mode 100644 internal/controller/policy/reconcile.go create mode 100644 internal/controller/policy/reconcile_test.go create mode 100644 internal/controller/policy/status.go create mode 100644 internal/controller/s3instance/constants.go create mode 100644 internal/controller/s3instance/controller.go create mode 100644 internal/controller/s3instance/finalizer.go create mode 100644 internal/controller/s3instance/finalizer_test.go create mode 100644 internal/controller/s3instance/reconcile.go create mode 100644 internal/controller/s3instance/reconcile_test.go create mode 100644 internal/controller/s3instance/status.go rename {controllers => internal/controller}/suite_test.go (73%) create mode 100644 internal/controller/user/constants.go create mode 100644 internal/controller/user/controller.go create mode 100644 internal/controller/user/finalizer.go create mode 100644 internal/controller/user/finalizer_test.go create mode 100644 internal/controller/user/reconcile.go create mode 100644 internal/controller/user/reconcile_test.go create mode 100644 internal/controller/user/status.go create mode 100644 internal/controller/user/utils.go create mode 100644 internal/helpers/S3instance_test.go create mode 100644 internal/helpers/controller.go create mode 100644 internal/helpers/controller_test.go rename {controllers/utils/password => internal/helpers}/password_generator.go (78%) create mode 100644 internal/helpers/password_generator_test.go create mode 100644 internal/helpers/s3instance.go rename {controllers/s3/factory => internal/s3/client/impl}/minioS3Client.go (56%) rename {controllers/s3/factory => internal/s3/client/impl}/mockedS3Client.go (59%) rename controllers/s3/factory/interface.go => internal/s3/client/s3client.go (56%) create mode 100644 internal/s3/factory/impl/s3factoryImpl.go create mode 100644 internal/s3/factory/s3factory.go create mode 100644 test/mocks/S3FactoryMock.go create mode 100644 test/mocks/mockedS3Client.go create mode 100644 test/utils/testUtils.go diff --git a/.github/workflows/ci-chart.yaml b/.github/workflows/ci-chart.yaml new file mode 100644 index 0000000..880baca --- /dev/null +++ b/.github/workflows/ci-chart.yaml @@ -0,0 +1,60 @@ +name: Helm CI + +on: + push: + branches: + - "**" + paths-ignore: + - 'docs/**' + tags: + - "*" + pull_request: + paths-ignore: + - 'docs/**' + +jobs: + + helm-lint: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Helm on Runner + uses: azure/setup-helm@v4.2.0 + + - name: Lint Chart + run: helm lint . + + helm-unitest: + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Helm on Runner + uses: azure/setup-helm@v4.2.0 + + - name: Lint Chart + run: echo "TODO" + + release_helm: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Helm on Runner + uses: azure/setup-helm@v4.2.0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.6.0 + with: + charts_dir: deploy/charts + skip_existing: true + env: + CR_TOKEN: "${{ secrets.CR_TOKEN }}" diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci-docker.yaml similarity index 61% rename from .github/workflows/ci.yaml rename to .github/workflows/ci-docker.yaml index fc9cb7e..c548e77 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci-docker.yaml @@ -1,38 +1,69 @@ -on: [push] -name: build -jobs: - build: +name: Docker CI + +on: + push: + branches: + - "**" + paths-ignore: + - 'docs/**' + tags: + - "*" + pull_request: + paths-ignore: + - 'docs/**' + +jobs: + + lint-docker: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - uses: hadolint/hadolint-action@v3.1.0 + with: + dockerfile: Dockerfile + + build-docker: outputs: version: ${{ steps.get_version.outputs.VERSION }} runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 + - name: Docker meta id: docker_meta - uses: crazy-max/ghaction-docker-meta@v5.1.0 + uses: docker/metadata-action@v5 with: images: inseefrlab/s3-operator # list of Docker images to use as base name for tags + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + - name: Set up QEMU uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + - name: Login to DockerHub if: github.event_name != 'pull_request' uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile push: ${{ github.event_name != 'pull_request' }} - # Use tags computed before and also latest if on master + # Use tags computed before tags: | ${{ steps.docker_meta.outputs.tags }} - ${{ github.ref == 'refs/heads/main' && 'inseefrlab/s3-operator:latest' || '' }} labels: ${{ steps.docker_meta.outputs.labels }} platforms: linux/amd64,linux/arm64 - name: Image digest diff --git a/.github/workflows/ci-go.yaml b/.github/workflows/ci-go.yaml new file mode 100644 index 0000000..322420d --- /dev/null +++ b/.github/workflows/ci-go.yaml @@ -0,0 +1,41 @@ +name: Golang CI + +on: + push: + branches: + - "**" + paths-ignore: + - 'docs/**' + tags: + - "*" + pull_request: + paths-ignore: + - 'docs/**' + +jobs: + + go: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '1.23.x' + - name: Install dependencies + run: go mod download + - name: Test with Go + run: go test -v ./... -coverprofile cover.out + - name: Upload Go test results + uses: actions/upload-artifact@v4 + with: + name: coverage + path: cover.out + - name: Build + run: go build -v ./... + - name: 'Upload Artifact' + uses: actions/upload-artifact@v4 + with: + name: manager + path: ./bin/manager diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..060d775 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,40 @@ +on: + push: + # Sequence of patterns matched against refs/tags + tags: + - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 + +name: Upload Release Asset + +jobs: + upload-release-assets: + name: Upload Release Asset + needs: build-go + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Download a single artifact + uses: actions/download-artifact@v4 + with: + name: manager + - name: Create Release + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.ref }} + release_name: Release ${{ github.ref }} + draft: false + prerelease: false + - name: Upload Release Asset + id: upload-release-asset + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps + asset_path: bin/manager + asset_name: s3-operator + asset_content_type: application/x-executable \ No newline at end of file diff --git a/.gitignore b/.gitignore index 8ab387a..8429532 100644 --- a/.gitignore +++ b/.gitignore @@ -24,4 +24,6 @@ Dockerfile.cross *.swp *.swo *~ -.vscode \ No newline at end of file +.vscode + +values-local.yaml \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 31c5110..7f91fc3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # Build the manager binary -FROM golang:1.22 as builder +FROM golang:1.23 as builder ARG TARGETOS ARG TARGETARCH @@ -12,16 +12,17 @@ COPY go.sum go.sum RUN go mod download # Copy the go source -COPY main.go main.go +COPY cmd/main.go cmd/main.go COPY api/ api/ -COPY controllers/ controllers/ +COPY internal/ internal/ + # Build # the GOARCH has not a default value to allow the binary be built according to the host where the command # was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO # the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, # by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. -RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager main.go +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details diff --git a/LICENSE b/LICENSE index 72e15a7..8cd9edd 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,13 @@ -MIT License +Copyright 2023. -Copyright (c) 2023 InseeFrLab +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + http://www.apache.org/licenses/LICENSE-2.0 -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Makefile b/Makefile index 5eb7576..689d46a 100644 --- a/Makefile +++ b/Makefile @@ -48,12 +48,12 @@ endif # Set the Operator SDK version to use. By default, what is installed on the system is used. # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v1.32.0 +OPERATOR_SDK_VERSION ?= v1.39.1 # Image URL to use all building/pushing image targets IMG ?= controller:latest # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.26.0 +ENVTEST_K8S_VERSION = 1.32.0 # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -113,11 +113,11 @@ test: manifests generate fmt vet envtest ## Run tests. .PHONY: build build: manifests generate fmt vet ## Build manager binary. - go build -o bin/manager main.go + go build -o bin/manager cmd/main.go .PHONY: run run: manifests generate fmt vet ## Run a controller from your host. - go run ./main.go + go run ./cmd/main.go # If you wish built the manager image targeting other platforms you can use the --platform flag. # (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. @@ -184,7 +184,7 @@ ENVTEST ?= $(LOCALBIN)/setup-envtest ## Tool Versions KUSTOMIZE_VERSION ?= v3.8.7 -CONTROLLER_TOOLS_VERSION ?= v0.14.0 +CONTROLLER_TOOLS_VERSION ?= v0.17.1 KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" .PHONY: kustomize @@ -279,3 +279,7 @@ catalog-build: opm ## Build a catalog image. .PHONY: catalog-push catalog-push: ## Push a catalog image. $(MAKE) docker-push IMG=$(CATALOG_IMG) + +.PHONY: go-unittest +go-unittest: ## Build the bundle image. + go test -v ./... diff --git a/PROJECT b/PROJECT index 4fefe51..7ed69c8 100644 --- a/PROJECT +++ b/PROJECT @@ -4,7 +4,7 @@ # More info: https://book.kubebuilder.io/reference/project-config.html domain: onyxia.sh layout: -- go.kubebuilder.io/v3 +- go.kubebuilder.io/v4 plugins: manifests.sdk.operatorframework.io/v2: {} scorecard.sdk.operatorframework.io/v2: {} @@ -38,4 +38,22 @@ resources: kind: Path path: github.com/InseeFrLab/s3-operator/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: onyxia.sh + group: s3.onyxia.sh + kind: S3instance + path: github.com/InseeFrLab/s3-operator/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: onyxia.sh + group: s3.onyxia.sh + kind: S3User + path: github.com/InseeFrLab/s3-operator/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/README.md b/README.md index 9b1ba50..185ecbd 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ This Operator SDK based tool aims at managing S3 related resources (buckets, pol ## At a glance -- Current S3 providers : [Minio](https://github.com/InseeFrLab/s3-operator/blob/main/controllers/s3/factory/minioS3Client.go) +- Current S3 providers : [Minio](https://github.com/InseeFrLab/s3-operator/blob/main/internal/s3/factory/minioS3Client.go) - Currently managed S3 resources : [buckets](https://github.com/InseeFrLab/s3-operator/blob/main/api/v1alpha1/bucket_types.go), [policies](https://github.com/InseeFrLab/s3-operator/blob/main/api/v1alpha1/policy_types.go) ## Compatibility @@ -21,7 +21,8 @@ At its heart, the operator revolves around CRDs that match S3 resources : - `buckets.s3.onyxia.sh` - `policies.s3.onyxia.sh` - `paths.s3.onyxia.sh` -- `users.s3.onyxia.sh` +- `s3Users.s3.onyxia.sh` +- `s3Instances.s3.onyxia.sh` The custom resources based on these CRDs are a somewhat simplified projection of the real S3 resources. From the operator's point of view : @@ -29,6 +30,7 @@ The custom resources based on these CRDs are a somewhat simplified projection of - A `Policy` CR matches a "canned" policy (not a bucket policy, but a global one, that can be attached to a user), and has a name, and its actual content (IAM JSON) - A `Path` CR matches a set of paths inside of a policy. This is akin to the `paths` property of the `Bucket` CRD, except `Path` is not responsible for Bucket creation. - A `S3User` CR matches a user in the s3 server, and has a name, a set of policy and a set of group. +- A `S3Instance` CR matches a s3Instance. Each custom resource based on these CRDs on Kubernetes is to be matched with a resource on the S3 instance. If the CR and the corresponding S3 resource diverge, the operator will create or update the S3 resource to bring it back to. @@ -72,25 +74,12 @@ The operator exposes a few parameters, meant to be set as arguments, though it's The parameters are summarized in the table below : -| Flag name | Default | Environment variable | Multiple values allowed | Description | -| ------------------------------- | ---------------- | -------------------- | ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | -| `health-probe-bind-address` | `:8081` | - | no | The address the probe endpoint binds to. Comes from Operator SDK. | -| `leader-elect` | `false` | - | no | Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. Comes from Operator SDK. | -| `metrics-bind-address` | `:8080` | - | no | The address the metric endpoint binds to. Comes from Operator SDK. | -| `region` | `us-east-1` | - | no | The region to configure for the S3 client. | -| `s3-access-key` | - | `S3_ACCESS_KEY` | no | The access key used to interact with the S3 server. | -| `s3-ca-certificate-base64` | - | - | yes | (Optional) Base64 encoded, PEM format CA certificate, for https requests to the S3 server. | -| `s3-ca-certificate-bundle-path` | - | - | no | (Optional) Path to a CA certificates bundle file, for https requests to the S3 server. | -| `s3-endpoint-url` | `localhost:9000` | - | no | Hostname (or hostname:port) of the S3 server. | -| `s3-provider` | `minio` | - | no | S3 provider (possible values : `minio`, `mockedS3Provider`) | -| `s3-secret-key` | - | `S3_SECRET_KEY` | no | The secret key used to interact with the S3 server. | -| `useSsl` | true | - | no | Use of SSL/TLS to connect to the S3 server | -| `bucket-deletion` | false | - | no | Trigger bucket deletion on the S3 backend upon CR deletion. Will fail if bucket is not empty. | -| `policy-deletion` | false | - | no | Trigger policy deletion on the S3 backend upon CR deletion | -| `path-deletion` | false | - | no | Trigger path deletion on the S3 backend upon CR deletion. Limited to deleting the `.keep` files used by the operator. | -| `s3User-deletion` | false | - | no | Trigger S3User deletion on the S3 backend upon CR deletion. | -| `override-existing-secret` | false | - | no | Update secret linked to s3User if already exist, else noop | - +| Flag name | Default | Environment variable | Multiple values allowed | Description | +| --------------------------- | ------- | -------------------- | ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| `health-probe-bind-address` | `:8081` | - | no | The address the probe endpoint binds to. Comes from Operator SDK. | +| `leader-elect` | `false` | - | no | Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. Comes from Operator SDK. | +| `metrics-bind-address` | `:8080` | - | no | The address the metric endpoint binds to. Comes from Operator SDK. | | +| `override-existing-secret` | false | - | no | Update secret linked to s3User if already exist, else noop | ## Minimal rights needed to work The Operator need at least this rights: @@ -147,6 +136,34 @@ The Operator need at least this rights: - The same will happen if you modify a CR - the operator will adjust the S3 bucket or policy accordingly - with the notable exception that it will not delete paths for buckets. - Upon deleting a CR, the corresponding bucket or policy will be left as is, as mentioned in the [*Description* section above](#description) +An instance of S3Operator can manage multiple S3. On each resource created you can set where to create it. To add multiple instance of S3 see S3Instance example. On each object deployed you can attach it to an existing s3Instance. If no instance is set on the resource, S3Operator will failback to default instance configured by env var. + +### S3Instance example + +```yaml +apiVersion: s3.onyxia.sh/v1alpha1 +kind: S3Instance +metadata: + labels: + app.kubernetes.io/name: bucket + app.kubernetes.io/instance: bucket-sample + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: s3-operator + name: s3-default-instance # Name of the S3Instance +spec: + s3Provider: minio # Type of the Provider. Can be "mockedS3Provider" or "minio" + url: https://minio.example.com # URL of the Provider + secretRef: minio-credentials # Name of the secret containing 2 Keys S3_ACCESS_KEY and S3_SECRET_KEY + caCertSecretRef: minio-certs # Name of the secret containing key ca.crt with cert of s3provider + region: us-east-1 # Region of the Provider + allowedNamespaces: [] # namespaces allowed to have buckets, policies, ... Wildcard prefix/suffix allowed. If empty only the same namespace as s3instance is allowed + bucketDeletionEnabled: true # Allowed bucket entity suppression on s3instance + policyDeletionEnabled: true # Allowed policy entity suppression on s3instance + pathDeletionEnabled: true # Allowed path entity suppression on s3instance + s3UserDeletionEnabled: true # Allowed s3User entity suppression on s3instance +``` + ### Bucket example ```yaml @@ -182,6 +199,10 @@ spec: quota: default: 10000000 # override: 20000000 + + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" + ``` @@ -202,10 +223,13 @@ spec: # Policy name (on S3 server, as opposed to the name of the CR) name: dummy-policy + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" + # Content of the policy, as a multiline string # This should be IAM compliant JSON - follow the guidelines of the actual # S3 provider you're using, as sometimes only a subset is available. - # The first Statement (Allow ListBucket) should be applied to every user, + The first Statement (Allow ListBucket) should be applied to every user, # as s3-operator uses this call to verify that credentials are valid when # reconciling an existing user. policyContent: >- @@ -257,6 +281,8 @@ spec: - /home/alice - /home/bob + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" ``` @@ -278,11 +304,20 @@ spec: policies: - policy-example1 - policy-example2 + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" ``` Each S3user is linked to a kubernetes secret which have the same name that the S3User. The secret contains 2 keys: `accessKey` and `secretKey`. +### :info: How works s3InstanceRef + +S3InstanceRef can get the following values: +- empty: In this case the s3instance use will be the default one configured at startup if the namespace is in the namespace allowed for this s3Instance +- `s3InstanceName`: In this case the s3Instance use will be the s3Instance with the name `s3InstanceName` in the current namespace (if the current namespace is allowed) +- `namespace/s3InstanceName`: In this case the s3Instance use will be the s3Instance with the name `s3InstanceName` in the namespace `namespace` (if the current namespace is allowed to use this s3Instance) + ## Operator SDK generated guidelines
diff --git a/api/v1alpha1/bucket_types.go b/api/v1alpha1/bucket_types.go index af600d3..578372f 100644 --- a/api/v1alpha1/bucket_types.go +++ b/api/v1alpha1/bucket_types.go @@ -36,6 +36,14 @@ type BucketSpec struct { // +kubebuilder:validation:Optional Paths []string `json:"paths,omitempty"` + // s3InstanceRef where create the bucket + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=127 + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="s3InstanceRef is immutable" + // +kubebuilder:default=s3-operator/default + S3InstanceRef string `json:"s3InstanceRef"` + // Quota to apply to the bucket // +kubebuilder:validation:Required Quota Quota `json:"quota"` @@ -64,15 +72,11 @@ type Bucket struct { // BucketList contains a list of Bucket type BucketList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` + metav1.TypeMeta ` json:",inline"` + metav1.ListMeta ` json:"metadata,omitempty"` Items []Bucket `json:"items"` } -func init() { - SchemeBuilder.Register(&Bucket{}, &BucketList{}) -} - type Quota struct { // Default quota to apply, mandatory // +kubebuilder:validation:Required @@ -82,3 +86,7 @@ type Quota struct { // +kubebuilder:validation:Optional Override int64 `json:"override,omitempty"` } + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/api/v1alpha1/path_types.go b/api/v1alpha1/path_types.go index 58f5aad..1f69c86 100644 --- a/api/v1alpha1/path_types.go +++ b/api/v1alpha1/path_types.go @@ -35,6 +35,14 @@ type PathSpec struct { // Paths (folders) to create inside the bucket // +kubebuilder:validation:Optional Paths []string `json:"paths,omitempty"` + + // s3InstanceRef where create the Paths + // +kubebuilder:default=s3-operator/default + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="s3InstanceRef is immutable" + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=127 + S3InstanceRef string `json:"s3InstanceRef,omitempty"` } // PathStatus defines the observed state of Path @@ -60,8 +68,8 @@ type Path struct { // PathList contains a list of Path type PathList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` + metav1.TypeMeta ` json:",inline"` + metav1.ListMeta ` json:"metadata,omitempty"` Items []Path `json:"items"` } diff --git a/api/v1alpha1/policy_types.go b/api/v1alpha1/policy_types.go index 221fa4a..17078af 100644 --- a/api/v1alpha1/policy_types.go +++ b/api/v1alpha1/policy_types.go @@ -35,6 +35,14 @@ type PolicySpec struct { // +kubebuilder:validation:Required // Content of the policy (IAM JSON format) PolicyContent string `json:"policyContent"` + + // s3InstanceRef where create the Policy + // +kubebuilder:default=s3-operator/default + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="s3InstanceRef is immutable" + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=127 + S3InstanceRef string `json:"s3InstanceRef,omitempty"` } // PolicyStatus defines the observed state of Policy @@ -60,8 +68,8 @@ type Policy struct { // PolicyList contains a list of Policy type PolicyList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` + metav1.TypeMeta ` json:",inline"` + metav1.ListMeta ` json:"metadata,omitempty"` Items []Policy `json:"items"` } diff --git a/api/v1alpha1/s3instance_types.go b/api/v1alpha1/s3instance_types.go new file mode 100644 index 0000000..fa252d7 --- /dev/null +++ b/api/v1alpha1/s3instance_types.go @@ -0,0 +1,103 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// S3InstanceSpec defines the desired state of S3Instance +type S3InstanceSpec struct { + + // type of the S3Instance + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="S3Provider is immutable" + // +kubebuilder:default=minio + // +kubebuilder:validation:Enum=minio;mockedS3Provider + S3Provider string `json:"s3Provider,omitempty"` + + // url of the S3Instance + // +kubebuilder:validation:Required + Url string `json:"url"` + + // Ref to Secret associated to the S3Instance containing accessKey and secretKey + // +kubebuilder:validation:Required + SecretRef string `json:"secretRef"` + + // region associated to the S3Instance + // +kubebuilder:validation:Optional + Region string `json:"region,omitempty"` + + // Secret containing key ca.crt with the certificate associated to the S3InstanceUrl + // +kubebuilder:validation:Optional + CaCertSecretRef string `json:"caCertSecretRef,omitempty"` + + // AllowedNamespaces to use this S3InstanceUrl if empty only the namespace of this instance url is allowed to use it + // +kubebuilder:validation:Optional + AllowedNamespaces []string `json:"allowedNamespaces,omitempty"` + + // BucketDeletionEnabled Trigger bucket deletion on the S3 backend upon CR deletion. Will fail if bucket is not empty. + // +kubebuilder:default=false + BucketDeletionEnabled bool `json:"bucketDeletionEnabled,omitempty"` + + // PolicyDeletionEnabled Trigger policy deletion on the S3 backend upon CR deletion. + // +kubebuilder:default=false + PolicyDeletionEnabled bool `json:"policyDeletionEnabled,omitempty"` + + // PathDeletionEnabled Trigger path deletion on the S3 backend upon CR deletion. Limited to deleting the `.keep` files used by the operator. + // +kubebuilder:default=false + PathDeletionEnabled bool `json:"pathDeletionEnabled,omitempty"` + + // S3UserDeletionEnabled Trigger S3 deletion on the S3 backend upon CR deletion. + // +kubebuilder:default=false + S3UserDeletionEnabled bool `json:"s3UserDeletionEnabled,omitempty"` +} + +// S3InstanceStatus defines the observed state of S3Instance +type S3InstanceStatus struct { + // Status management using Conditions. + // See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// S3Instance is the Schema for the S3Instances API +type S3Instance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec S3InstanceSpec `json:"spec,omitempty"` + Status S3InstanceStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// S3InstanceList contains a list of S3Instance +type S3InstanceList struct { + metav1.TypeMeta ` json:",inline"` + metav1.ListMeta ` json:"metadata,omitempty"` + Items []S3Instance `json:"items"` +} + +func init() { + SchemeBuilder.Register(&S3Instance{}, &S3InstanceList{}) +} diff --git a/api/v1alpha1/s3user_types.go b/api/v1alpha1/s3user_types.go index 7804d8a..bb361c6 100644 --- a/api/v1alpha1/s3user_types.go +++ b/api/v1alpha1/s3user_types.go @@ -38,6 +38,14 @@ type S3UserSpec struct { // +kubebuilder:validation:Optional SecretName string `json:"secretName"` + // s3InstanceRef where create the user + // +kubebuilder:default=s3-operator/default + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="s3InstanceRef is immutable" + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=127 + S3InstanceRef string `json:"s3InstanceRef,omitempty"` + // SecretFieldNameAccessKey associated to the S3User // Allow overridden the default key to store the accessKey value in the secret // +kubebuilder:validation:Optional @@ -76,8 +84,8 @@ type S3User struct { // S3UserList contains a list of S3User type S3UserList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` + metav1.TypeMeta ` json:",inline"` + metav1.ListMeta ` json:"metadata,omitempty"` Items []S3User `json:"items"` } diff --git a/api/v1alpha1/types.go b/api/v1alpha1/types.go new file mode 100644 index 0000000..8a44415 --- /dev/null +++ b/api/v1alpha1/types.go @@ -0,0 +1,17 @@ +package v1alpha1 + +// Definitions to manage status condition types +const ( + // ConditionReconciled represents the status of the resource reconciliation + ConditionReconciled = "Reconciled" +) + +// Definitions to manage status condition reasons +const ( + Reconciling = "Reconciling" + Unreachable = "Unreachable" + CreationFailure = "CreationFailure" + Reconciled = "Reconciled" + DeletionFailure = "DeletionFailure" + DeletionBlocked = "DeletionBlocked" +) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index ee470a7..f191f44 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -339,6 +339,107 @@ func (in *Quota) DeepCopy() *Quota { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Instance) DeepCopyInto(out *S3Instance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Instance. +func (in *S3Instance) DeepCopy() *S3Instance { + if in == nil { + return nil + } + out := new(S3Instance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *S3Instance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InstanceList) DeepCopyInto(out *S3InstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]S3Instance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceList. +func (in *S3InstanceList) DeepCopy() *S3InstanceList { + if in == nil { + return nil + } + out := new(S3InstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *S3InstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InstanceSpec) DeepCopyInto(out *S3InstanceSpec) { + *out = *in + if in.AllowedNamespaces != nil { + in, out := &in.AllowedNamespaces, &out.AllowedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceSpec. +func (in *S3InstanceSpec) DeepCopy() *S3InstanceSpec { + if in == nil { + return nil + } + out := new(S3InstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InstanceStatus) DeepCopyInto(out *S3InstanceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceStatus. +func (in *S3InstanceStatus) DeepCopy() *S3InstanceStatus { + if in == nil { + return nil + } + out := new(S3InstanceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S3User) DeepCopyInto(out *S3User) { *out = *in diff --git a/main.go b/cmd/main.go similarity index 54% rename from main.go rename to cmd/main.go index 9415abf..37c86fb 100644 --- a/main.go +++ b/cmd/main.go @@ -20,14 +20,21 @@ import ( "flag" "fmt" "os" + "time" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - controllers "github.com/InseeFrLab/s3-operator/controllers" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" + bucketControllers "github.com/InseeFrLab/s3-operator/internal/controller/bucket" + pathControllers "github.com/InseeFrLab/s3-operator/internal/controller/path" + policyControllers "github.com/InseeFrLab/s3-operator/internal/controller/policy" + s3InstanceControllers "github.com/InseeFrLab/s3-operator/internal/controller/s3instance" + userControllers "github.com/InseeFrLab/s3-operator/internal/controller/user" + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory/impl" + "go.uber.org/zap/zapcore" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -59,7 +66,6 @@ func (flags *ArrayFlags) Set(value string) error { func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(s3v1alpha1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -70,47 +76,42 @@ func main() { var probeAddr string // S3 related variables - var s3EndpointUrl string - var accessKey string - var secretKey string - var region string - var s3Provider string - var useSsl bool - var caCertificatesBase64 ArrayFlags - var caCertificatesBundlePath string - var bucketDeletion bool - var policyDeletion bool - var pathDeletion bool - var s3userDeletion bool + var reconcilePeriod time.Duration //K8S related variable var overrideExistingSecret bool - flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.StringVar( + &metricsAddr, + "metrics-bind-address", + ":8080", + "The address the metric endpoint binds to.", + ) + flag.StringVar( + &probeAddr, + "health-probe-bind-address", + ":8081", + "The address the probe endpoint binds to.", + ) flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") + flag.DurationVar(&reconcilePeriod, "reconcile-period", 0, + "Default reconcile period for controllers. Zero to disable periodic reconciliation") // S3 related flags - flag.StringVar(&s3Provider, "s3-provider", "minio", "S3 provider (possible values : minio, mockedS3Provider)") - flag.StringVar(&s3EndpointUrl, "s3-endpoint-url", "localhost:9000", "Hostname (or hostname:port) of the S3 server") - flag.StringVar(&accessKey, "s3-access-key", "ROOTNAME", "The accessKey of the acount") - flag.StringVar(&secretKey, "s3-secret-key", "CHANGEME123", "The secretKey of the acount") - flag.Var(&caCertificatesBase64, "s3-ca-certificate-base64", "(Optional) Base64 encoded, PEM format certificate file for a certificate authority, for https requests to S3") - flag.StringVar(&caCertificatesBundlePath, "s3-ca-certificate-bundle-path", "", "(Optional) Path to a CA certificate file, for https requests to S3") - flag.StringVar(®ion, "region", "us-east-1", "The region to configure for the S3 client") - flag.BoolVar(&useSsl, "useSsl", true, "Use of SSL/TLS to connect to the S3 endpoint") - flag.BoolVar(&bucketDeletion, "bucket-deletion", false, "Trigger bucket deletion on the S3 backend upon CR deletion. Will fail if bucket is not empty.") - flag.BoolVar(&policyDeletion, "policy-deletion", false, "Trigger policy deletion on the S3 backend upon CR deletion") - flag.BoolVar(&pathDeletion, "path-deletion", false, "Trigger path deletion on the S3 backend upon CR deletion. Limited to deleting the `.keep` files used by the operator.") - flag.BoolVar(&s3userDeletion, "s3user-deletion", false, "Trigger S3 deletion on the S3 backend upon CR deletion") - flag.BoolVar(&overrideExistingSecret, "override-existing-secret", false, "Override existing secret associated to user in case of the secret already exist") + flag.BoolVar( + &overrideExistingSecret, + "override-existing-secret", + false, + "Override existing secret associated to user in case of the secret already exist", + ) opts := zap.Options{ Development: true, TimeEncoder: zapcore.ISO8601TimeEncoder, } + opts.BindFlags(flag.CommandLine) flag.Parse() @@ -121,6 +122,10 @@ func main() { BindAddress: metricsAddr, } + s3Factory := s3factory.NewS3Factory() + s3InstanceHelper := helpers.NewS3InstanceHelper() + controllerHelper := helpers.NewControllerHelper() + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, Metrics: serverOption, @@ -150,65 +155,64 @@ func main() { os.Exit(1) } - // For S3 access key and secret key, we first try to read the values from environment variables. - // Only if these are not defined do we use the respective flags. - var accessKeyFromEnvIfAvailable = os.Getenv("S3_ACCESS_KEY") - if accessKeyFromEnvIfAvailable == "" { - accessKeyFromEnvIfAvailable = accessKey - } - var secretKeyFromEnvIfAvailable = os.Getenv("S3_SECRET_KEY") - if secretKeyFromEnvIfAvailable == "" { - secretKeyFromEnvIfAvailable = secretKey - } - - // Creation of the S3 client - s3Config := &factory.S3Config{S3Provider: s3Provider, S3UrlEndpoint: s3EndpointUrl, Region: region, AccessKey: accessKeyFromEnvIfAvailable, SecretKey: secretKeyFromEnvIfAvailable, UseSsl: useSsl, CaCertificatesBase64: caCertificatesBase64, CaBundlePath: caCertificatesBundlePath} - s3Client, err := factory.GetS3Client(s3Config.S3Provider, s3Config) - if err != nil { - // setupLog.Log.Error(err, err.Error()) - // fmt.Print(s3Client) - // fmt.Print(err) - setupLog.Error(err, "an error occurred while creating the S3 client", "s3Client", s3Client) + if err = (&s3InstanceControllers.S3InstanceReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ReconcilePeriod: reconcilePeriod, + S3factory: s3Factory, + ControllerHelper: controllerHelper, + S3Instancehelper: s3InstanceHelper, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "S3Instance") os.Exit(1) } - - if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - S3Client: s3Client, - BucketDeletion: bucketDeletion, + if err = (&bucketControllers.BucketReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ReconcilePeriod: reconcilePeriod, + S3factory: s3Factory, + ControllerHelper: controllerHelper, + S3Instancehelper: s3InstanceHelper, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Bucket") os.Exit(1) } - if err = (&controllers.PathReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - S3Client: s3Client, - PathDeletion: pathDeletion, + if err = (&pathControllers.PathReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ReconcilePeriod: reconcilePeriod, + S3factory: s3Factory, + ControllerHelper: controllerHelper, + S3Instancehelper: s3InstanceHelper, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Path") os.Exit(1) } - if err = (&controllers.PolicyReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - S3Client: s3Client, - PolicyDeletion: policyDeletion, + if err = (&policyControllers.PolicyReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ReconcilePeriod: reconcilePeriod, + S3factory: s3Factory, + ControllerHelper: controllerHelper, + S3Instancehelper: s3InstanceHelper, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Policy") os.Exit(1) } - if err = (&controllers.S3UserReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - S3Client: s3Client, - S3UserDeletion: s3userDeletion, - OverrideExistingSecret: overrideExistingSecret, + if err = (&userControllers.S3UserReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + OverrideExistingSecret: overrideExistingSecret, + ReconcilePeriod: reconcilePeriod, + S3factory: s3Factory, + ControllerHelper: controllerHelper, + S3Instancehelper: s3InstanceHelper, + PasswordGeneratorHelper: helpers.NewPasswordGenerator(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "S3User") os.Exit(1) } + //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/s3.onyxia.sh_buckets.yaml b/config/crd/bases/s3.onyxia.sh_buckets.yaml index fafe6f1..4d46e44 100644 --- a/config/crd/bases/s3.onyxia.sh_buckets.yaml +++ b/config/crd/bases/s3.onyxia.sh_buckets.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.17.1 name: buckets.s3.onyxia.sh spec: group: s3.onyxia.sh @@ -61,9 +61,20 @@ spec: required: - default type: object + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the bucket + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf required: - name - quota + - s3InstanceRef type: object status: description: BucketStatus defines the observed state of Bucket @@ -73,16 +84,8 @@ spec: Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -123,12 +126,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/s3.onyxia.sh_paths.yaml b/config/crd/bases/s3.onyxia.sh_paths.yaml index c881d1a..67fd1c5 100644 --- a/config/crd/bases/s3.onyxia.sh_paths.yaml +++ b/config/crd/bases/s3.onyxia.sh_paths.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.17.1 name: paths.s3.onyxia.sh spec: group: s3.onyxia.sh @@ -47,6 +47,16 @@ spec: items: type: string type: array + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the Paths + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf required: - bucketName type: object @@ -58,16 +68,8 @@ spec: Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -108,12 +110,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/s3.onyxia.sh_policies.yaml b/config/crd/bases/s3.onyxia.sh_policies.yaml index 080f641..ce5f62e 100644 --- a/config/crd/bases/s3.onyxia.sh_policies.yaml +++ b/config/crd/bases/s3.onyxia.sh_policies.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.17.1 name: policies.s3.onyxia.sh spec: group: s3.onyxia.sh @@ -45,6 +45,16 @@ spec: policyContent: description: Content of the policy (IAM JSON format) type: string + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the Policy + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf required: - name - policyContent @@ -57,16 +67,8 @@ spec: Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -107,12 +109,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/s3.onyxia.sh_s3instances.yaml b/config/crd/bases/s3.onyxia.sh_s3instances.yaml new file mode 100644 index 0000000..8cf429d --- /dev/null +++ b/config/crd/bases/s3.onyxia.sh_s3instances.yaml @@ -0,0 +1,165 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: s3instances.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: S3Instance + listKind: S3InstanceList + plural: s3instances + singular: s3instance + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: S3Instance is the Schema for the S3Instances API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: S3InstanceSpec defines the desired state of S3Instance + properties: + allowedNamespaces: + description: AllowedNamespaces to use this S3InstanceUrl if empty + only the namespace of this instance url is allowed to use it + items: + type: string + type: array + bucketDeletionEnabled: + default: false + description: BucketDeletionEnabled Trigger bucket deletion on the + S3 backend upon CR deletion. Will fail if bucket is not empty. + type: boolean + caCertSecretRef: + description: Secret containing key ca.crt with the certificate associated + to the S3InstanceUrl + type: string + pathDeletionEnabled: + default: false + description: PathDeletionEnabled Trigger path deletion on the S3 backend + upon CR deletion. Limited to deleting the `.keep` files used by + the operator. + type: boolean + policyDeletionEnabled: + default: false + description: PolicyDeletionEnabled Trigger policy deletion on the + S3 backend upon CR deletion. + type: boolean + region: + description: region associated to the S3Instance + type: string + s3Provider: + default: minio + description: type of the S3Instance + enum: + - minio + - mockedS3Provider + type: string + x-kubernetes-validations: + - message: S3Provider is immutable + rule: self == oldSelf + s3UserDeletionEnabled: + default: false + description: S3UserDeletionEnabled Trigger S3 deletion on the S3 backend + upon CR deletion. + type: boolean + secretRef: + description: Ref to Secret associated to the S3Instance containing + accessKey and secretKey + type: string + url: + description: url of the S3Instance + type: string + required: + - s3Provider + - secretRef + - url + type: object + status: + description: S3InstanceStatus defines the observed state of S3Instance + properties: + conditions: + description: |- + Status management using Conditions. + See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/s3.onyxia.sh_s3users.yaml b/config/crd/bases/s3.onyxia.sh_s3users.yaml index cee63b7..5dffc6d 100644 --- a/config/crd/bases/s3.onyxia.sh_s3users.yaml +++ b/config/crd/bases/s3.onyxia.sh_s3users.yaml @@ -3,7 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.14.0 + controller-gen.kubebuilder.io/version: v0.17.1 name: s3users.s3.onyxia.sh spec: group: s3.onyxia.sh @@ -47,6 +47,16 @@ spec: items: type: string type: array + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the user + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf secretFieldNameAccessKey: default: accessKey description: |- @@ -73,16 +83,8 @@ spec: Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties items: - description: "Condition contains details for one aspect of the current - state of this API Resource.\n---\nThis struct is intended for - direct use as an array at the field path .status.conditions. For - example,\n\n\n\ttype FooStatus struct{\n\t // Represents the - observations of a foo's current state.\n\t // Known .status.conditions.type - are: \"Available\", \"Progressing\", and \"Degraded\"\n\t // - +patchMergeKey=type\n\t // +patchStrategy=merge\n\t // +listType=map\n\t - \ // +listMapKey=type\n\t Conditions []metav1.Condition `json:\"conditions,omitempty\" - patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"`\n\n\n\t - \ // other fields\n\t}" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: description: |- @@ -123,12 +125,7 @@ spec: - Unknown type: string type: - description: |- - type of condition in CamelCase or in foo.example.com/CamelCase. - --- - Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be - useful (see .node.status.conditions), the ability to deconflict is important. - The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + description: type of condition in CamelCase or in foo.example.com/CamelCase. maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 2550526..e7383eb 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -5,27 +5,26 @@ metadata: name: manager-role rules: - apiGroups: - - s3.onyxia.sh + - "" resources: - - S3User + - secrets verbs: - create - delete - get - list - - patch - update - watch - apiGroups: - - s3.onyxia.sh + - "" resources: - - S3User/finalizers + - secrets/finalizers verbs: - update - apiGroups: - - s3.onyxia.sh + - "" resources: - - S3User/status + - secrets/status verbs: - get - patch @@ -34,58 +33,10 @@ rules: - s3.onyxia.sh resources: - buckets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - s3.onyxia.sh - resources: - - buckets/finalizers - verbs: - - update -- apiGroups: - - s3.onyxia.sh - resources: - - buckets/status - verbs: - - get - - patch - - update -- apiGroups: - - s3.onyxia.sh - resources: - paths - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - s3.onyxia.sh - resources: - - paths/finalizers - verbs: - - update -- apiGroups: - - s3.onyxia.sh - resources: - - paths/status - verbs: - - get - - patch - - update -- apiGroups: - - s3.onyxia.sh - resources: - policies + - s3instances + - s3users verbs: - create - delete @@ -97,13 +48,21 @@ rules: - apiGroups: - s3.onyxia.sh resources: + - buckets/finalizers + - paths/finalizers - policies/finalizers + - s3instances/finalizers + - s3users/finalizers verbs: - update - apiGroups: - s3.onyxia.sh resources: + - buckets/status + - paths/status - policies/status + - s3instances/status + - s3users/status verbs: - get - patch diff --git a/config/samples/s3.onyxia.sh_v1alpha1_bucket.yaml b/config/samples/s3.onyxia.sh_v1alpha1_bucket.yaml index 4d55426..1f201b8 100644 --- a/config/samples/s3.onyxia.sh_v1alpha1_bucket.yaml +++ b/config/samples/s3.onyxia.sh_v1alpha1_bucket.yaml @@ -1,12 +1,6 @@ apiVersion: s3.onyxia.sh/v1alpha1 kind: Bucket metadata: - labels: - app.kubernetes.io/name: bucket - app.kubernetes.io/instance: bucket-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator name: bucket-sample spec: # Bucket name (on S3 server, as opposed to the name of the CR) diff --git a/config/samples/s3.onyxia.sh_v1alpha1_path.yaml b/config/samples/s3.onyxia.sh_v1alpha1_path.yaml index 86cf9af..fa4a258 100644 --- a/config/samples/s3.onyxia.sh_v1alpha1_path.yaml +++ b/config/samples/s3.onyxia.sh_v1alpha1_path.yaml @@ -1,12 +1,6 @@ apiVersion: s3.onyxia.sh/v1alpha1 kind: Path metadata: - labels: - app.kubernetes.io/name: path - app.kubernetes.io/instance: path-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator name: path-sample spec: # Bucket name (on S3 server, as opposed to the name of the CR) diff --git a/config/samples/s3.onyxia.sh_v1alpha1_policy.yaml b/config/samples/s3.onyxia.sh_v1alpha1_policy.yaml index bff4f74..7e1ca77 100644 --- a/config/samples/s3.onyxia.sh_v1alpha1_policy.yaml +++ b/config/samples/s3.onyxia.sh_v1alpha1_policy.yaml @@ -1,12 +1,6 @@ apiVersion: s3.onyxia.sh/v1alpha1 kind: Policy metadata: - labels: - app.kubernetes.io/name: policy - app.kubernetes.io/instance: policy-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator name: policy-sample spec: # Policy name (on S3 server, as opposed to the name of the CR) diff --git a/config/samples/s3.onyxia.sh_v1alpha1_s3instance.yaml b/config/samples/s3.onyxia.sh_v1alpha1_s3instance.yaml new file mode 100644 index 0000000..131a555 --- /dev/null +++ b/config/samples/s3.onyxia.sh_v1alpha1_s3instance.yaml @@ -0,0 +1,31 @@ +apiVersion: s3.onyxia.sh/v1alpha1 +kind: S3Instance +metadata: + name: s3instance-sample +spec: + s3Provider: minio + url: https://minio.example.com + secretRef: minio-credentials + caCertSecretRef: minio-certificates + # allowedNamespaces: "*" # if not present only resources from the same namespace is allowed + # region: us-east-1 +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-credentials +type: Opaque +data: + S3_ACCESS_KEY: accessKey + S3_SECRET_KEY: secretkey +--- +apiVersion: v1 +kind: Secret +metadata: + name: s3-default-instance-cert +type: Opaque +stringData: + ca.crt: | + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- diff --git a/config/samples/s3.onyxia.sh_v1alpha1_s3user.yaml b/config/samples/s3.onyxia.sh_v1alpha1_s3user.yaml index 24a6458..fc11c14 100644 --- a/config/samples/s3.onyxia.sh_v1alpha1_s3user.yaml +++ b/config/samples/s3.onyxia.sh_v1alpha1_s3user.yaml @@ -1,12 +1,6 @@ apiVersion: s3.onyxia.sh/v1alpha1 kind: S3User metadata: - labels: - app.kubernetes.io/name: user - app.kubernetes.io/instance: user-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator name: user-sample spec: accessKey: user-sample diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go deleted file mode 100644 index f6a88bd..0000000 --- a/controllers/bucket_controller.go +++ /dev/null @@ -1,267 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - "github.com/InseeFrLab/s3-operator/controllers/utils" -) - -// BucketReconciler reconciles a Bucket object -type BucketReconciler struct { - client.Client - Scheme *runtime.Scheme - S3Client factory.S3Client - BucketDeletion bool -} - -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets/finalizers,verbs=update - -const bucketFinalizer = "s3.onyxia.sh/finalizer" - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile -func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // Checking for bucket resource existence - bucketResource := &s3v1alpha1.Bucket{} - err := r.Get(ctx, req.NamespacedName, bucketResource) - if err != nil { - if errors.IsNotFound(err) { - logger.Info("The Bucket custom resource has been removed ; as such the Bucket controller is NOOP.", "req.Name", req.Name) - return ctrl.Result{}, nil - } - logger.Error(err, "An error occurred when attempting to read the Bucket resource from the Kubernetes cluster") - return ctrl.Result{}, err - } - - // Managing bucket deletion with a finalizer - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources - isMarkedForDeletion := bucketResource.GetDeletionTimestamp() != nil - if isMarkedForDeletion { - if controllerutil.ContainsFinalizer(bucketResource, bucketFinalizer) { - // Run finalization logic for bucketFinalizer. If the - // finalization logic fails, don't remove the finalizer so - // that we can retry during the next reconciliation. - if err := r.finalizeBucket(bucketResource); err != nil { - // return ctrl.Result{}, err - logger.Error(err, "an error occurred when attempting to finalize the bucket", "bucket", bucketResource.Spec.Name) - // return ctrl.Result{}, err - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketFinalizeFailed", - fmt.Sprintf("An error occurred when attempting to delete bucket [%s]", bucketResource.Spec.Name), err) - } - - // Remove bucketFinalizer. Once all finalizers have been - // removed, the object will be deleted. - controllerutil.RemoveFinalizer(bucketResource, bucketFinalizer) - err := r.Update(ctx, bucketResource) - if err != nil { - logger.Error(err, "an error occurred when removing finalizer from bucket", "bucket", bucketResource.Spec.Name) - // return ctrl.Result{}, err - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketFinalizerRemovalFailed", - fmt.Sprintf("An error occurred when attempting to remove the finalizer from bucket [%s]", bucketResource.Spec.Name), err) - } - } - return ctrl.Result{}, nil - } - - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(bucketResource, bucketFinalizer) { - controllerutil.AddFinalizer(bucketResource, bucketFinalizer) - err = r.Update(ctx, bucketResource) - if err != nil { - logger.Error(err, "an error occurred when adding finalizer from bucket", "bucket", bucketResource.Spec.Name) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketFinalizerAddFailed", - fmt.Sprintf("An error occurred when attempting to add the finalizer from bucket [%s]", bucketResource.Spec.Name), err) - } - } - - // Bucket lifecycle management (other than deletion) starts here - - // Check bucket existence on the S3 server - found, err := r.S3Client.BucketExists(bucketResource.Spec.Name) - if err != nil { - logger.Error(err, "an error occurred while checking the existence of a bucket", "bucket", bucketResource.Spec.Name) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketExistenceCheckFailed", - fmt.Sprintf("Checking existence of bucket [%s] from S3 instance has failed", bucketResource.Spec.Name), err) - } - - // If the bucket does not exist, it is created based on the CR (with potential quotas and paths) - if !found { - - // Bucket creation - err = r.S3Client.CreateBucket(bucketResource.Spec.Name) - if err != nil { - logger.Error(err, "an error occurred while creating a bucket", "bucket", bucketResource.Spec.Name) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketCreationFailed", - fmt.Sprintf("Creation of bucket [%s] on S3 instance has failed", bucketResource.Spec.Name), err) - } - - // Setting quotas - err = r.S3Client.SetQuota(bucketResource.Spec.Name, bucketResource.Spec.Quota.Default) - if err != nil { - logger.Error(err, "an error occurred while setting a quota on a bucket", "bucket", bucketResource.Spec.Name, "quota", bucketResource.Spec.Quota.Default) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "SetQuotaOnBucketFailed", - fmt.Sprintf("Setting a quota of [%v] on bucket [%s] has failed", bucketResource.Spec.Quota.Default, bucketResource.Spec.Name), err) - } - - // Path creation - for _, v := range bucketResource.Spec.Paths { - err = r.S3Client.CreatePath(bucketResource.Spec.Name, v) - if err != nil { - logger.Error(err, "an error occurred while creating a path on a bucket", "bucket", bucketResource.Spec.Name, "path", v) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "CreatingPathOnBucketFailed", - fmt.Sprintf("Creating the path [%s] on bucket [%s] has failed", v, bucketResource.Spec.Name), err) - } - } - - // The bucket creation, quota setting and path creation happened without any error - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorSucceeded", metav1.ConditionTrue, "BucketCreated", - fmt.Sprintf("The bucket [%s] was created with its quota and paths", bucketResource.Spec.Name), nil) - } - - // If the bucket exists on the S3 server, then we need to compare it to - // its corresponding custom resource, and update it in case the CR has changed. - - // Checking effectiveQuota existence on the bucket - effectiveQuota, err := r.S3Client.GetQuota(bucketResource.Spec.Name) - if err != nil { - logger.Error(err, "an error occurred while getting the quota for a bucket", "bucket", bucketResource.Spec.Name) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketQuotaCheckFailed", - fmt.Sprintf("The check for a quota on bucket [%s] has failed", bucketResource.Spec.Name), err) - } - - // If a quota exists, we check it versus the spec of the CR. In case they don't match, - // we reset the quota using the value from CR ("override" is present, "default" if not) - - // Choosing between override / default - quotaToResetTo := bucketResource.Spec.Quota.Override - if quotaToResetTo == 0 { - quotaToResetTo = bucketResource.Spec.Quota.Default - } - - if effectiveQuota != quotaToResetTo { - err = r.S3Client.SetQuota(bucketResource.Spec.Name, quotaToResetTo) - if err != nil { - logger.Error(err, "an error occurred while resetting the quota for a bucket", "bucket", bucketResource.Spec.Name, "quotaToResetTo", quotaToResetTo) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketQuotaUpdateFailed", - fmt.Sprintf("The quota update (%v => %v) on bucket [%s] has failed", effectiveQuota, quotaToResetTo, bucketResource.Spec.Name), err) - } - } - - // For every path on the custom resource's spec, we check the path actually - // exists on the bucket on the S3 server, and create it if it doesn't - // TODO ? : the way this is naively implemented, it's probably costly. Maybe - // we can get the "effectiveBucket" (with its quota and paths) once at the beginning, - // and iterate on this instead of interrogating the S3 server twice for every path. - // But then again, some buckets will likely be filled with many objects outside the - // scope of the CR, so getting all of them might be even more costly. - for _, pathInCr := range bucketResource.Spec.Paths { - pathExists, err := r.S3Client.PathExists(bucketResource.Spec.Name, pathInCr) - if err != nil { - logger.Error(err, "an error occurred while checking a path's existence on a bucket", "bucket", bucketResource.Spec.Name, "path", pathInCr) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketPathCheckFailed", - fmt.Sprintf("The check for path [%s] on bucket [%s] has failed", pathInCr, bucketResource.Spec.Name), err) - } - - if !pathExists { - err = r.S3Client.CreatePath(bucketResource.Spec.Name, pathInCr) - if err != nil { - logger.Error(err, "an error occurred while creating a path on a bucket", "bucket", bucketResource.Spec.Name, "path", pathInCr) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketPathCreationFailed", - fmt.Sprintf("The creation of path [%s] on bucket [%s] has failed", pathInCr, bucketResource.Spec.Name), err) - } - } - } - - // The bucket reconciliation with its CR was succesful (or NOOP) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorSucceeded", metav1.ConditionTrue, "BucketUpdated", - fmt.Sprintf("The bucket [%s] was updated according to its matching custom resource", bucketResource.Spec.Name), nil) - -} - -// SetupWithManager sets up the controller with the Manager.* -func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&s3v1alpha1.Bucket{}). - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ - WithEventFilter(predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - // Only reconcile if generation has changed - return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Evaluates to false if the object has been confirmed deleted. - return !e.DeleteStateUnknown - }, - }). - WithOptions(controller.Options{MaxConcurrentReconciles: 10}). - Complete(r) -} - -func (r *BucketReconciler) finalizeBucket(bucketResource *s3v1alpha1.Bucket) error { - if r.BucketDeletion { - return r.S3Client.DeleteBucket(bucketResource.Spec.Name) - } - return nil -} - -func (r *BucketReconciler) SetBucketStatusConditionAndUpdate(ctx context.Context, bucketResource *s3v1alpha1.Bucket, conditionType string, status metav1.ConditionStatus, reason string, message string, srcError error) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // We moved away from meta.SetStatusCondition, as the implementation did not allow for updating - // lastTransitionTime if a Condition (as identified by Reason instead of Type) was previously - // obtained and updated to again. - bucketResource.Status.Conditions = utils.UpdateConditions(bucketResource.Status.Conditions, metav1.Condition{ - Type: conditionType, - Status: status, - Reason: reason, - LastTransitionTime: metav1.NewTime(time.Now()), - Message: message, - ObservedGeneration: bucketResource.GetGeneration(), - }) - - err := r.Status().Update(ctx, bucketResource) - if err != nil { - logger.Error(err, "an error occurred while updating the status of the bucket resource") - return ctrl.Result{}, utilerrors.NewAggregate([]error{err, srcError}) - } - return ctrl.Result{}, srcError -} diff --git a/controllers/path_controller.go b/controllers/path_controller.go deleted file mode 100644 index d789d5b..0000000 --- a/controllers/path_controller.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - "github.com/InseeFrLab/s3-operator/controllers/utils" -) - -// PathReconciler reconciles a Path object -type PathReconciler struct { - client.Client - Scheme *runtime.Scheme - S3Client factory.S3Client - PathDeletion bool -} - -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths/finalizers,verbs=update - -const pathFinalizer = "s3.onyxia.sh/finalizer" - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile -func (r *PathReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // Checking for path resource existence - pathResource := &s3v1alpha1.Path{} - err := r.Get(ctx, req.NamespacedName, pathResource) - if err != nil { - if errors.IsNotFound(err) { - logger.Info("The Path custom resource has been removed ; as such the Path controller is NOOP.", "req.Name", req.Name) - return ctrl.Result{}, nil - } - logger.Error(err, "An error occurred when attempting to read the Path resource from the Kubernetes cluster") - return ctrl.Result{}, err - } - - // Managing path deletion with a finalizer - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources - isMarkedForDeletion := pathResource.GetDeletionTimestamp() != nil - if isMarkedForDeletion { - if controllerutil.ContainsFinalizer(pathResource, pathFinalizer) { - // Run finalization logic for pathFinalizer. If the - // finalization logic fails, don't remove the finalizer so - // that we can retry during the next reconciliation. - if err := r.finalizePath(pathResource); err != nil { - // return ctrl.Result{}, err - logger.Error(err, "an error occurred when attempting to finalize the path", "path", pathResource.Name) - // return ctrl.Result{}, err - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathFinalizeFailed", - fmt.Sprintf("An error occurred when attempting to delete path [%s]", pathResource.Name), err) - } - - // Remove pathFinalizer. Once all finalizers have been - // removed, the object will be deleted. - controllerutil.RemoveFinalizer(pathResource, pathFinalizer) - err := r.Update(ctx, pathResource) - if err != nil { - logger.Error(err, "an error occurred when removing finalizer from path", "path", pathResource.Name) - // return ctrl.Result{}, err - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathFinalizerRemovalFailed", - fmt.Sprintf("An error occurred when attempting to remove the finalizer from path [%s]", pathResource.Name), err) - } - } - return ctrl.Result{}, nil - } - - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(pathResource, pathFinalizer) { - controllerutil.AddFinalizer(pathResource, pathFinalizer) - err = r.Update(ctx, pathResource) - if err != nil { - logger.Error(err, "an error occurred when adding finalizer from path", "path", pathResource.Name) - // return ctrl.Result{}, err - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathFinalizerAddFailed", - fmt.Sprintf("An error occurred when attempting to add the finalizer from path [%s]", pathResource.Name), err) - } - } - - // Path lifecycle management (other than deletion) starts here - - // Check bucket existence on the S3 server - bucketFound, err := r.S3Client.BucketExists(pathResource.Spec.BucketName) - if err != nil { - logger.Error(err, "an error occurred while checking the existence of a bucket", "bucket", pathResource.Spec.BucketName) - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "BucketExistenceCheckFailed", - fmt.Sprintf("Checking existence of bucket [%s] from S3 instance has failed", pathResource.Spec.BucketName), err) - } - - // If bucket does not exist, the Path CR should be in a failing state - if !bucketFound { - errorBucketNotFound := fmt.Errorf("the path CR %s references a non-existing bucket : %s", pathResource.Name, pathResource.Spec.BucketName) - logger.Error(errorBucketNotFound, errorBucketNotFound.Error()) - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "ReferencingNonExistingBucket", - fmt.Sprintf("The Path CR [%s] references a non-existing bucket [%s]", pathResource.Name, pathResource.Spec.BucketName), errorBucketNotFound) - } - - // If the bucket exists, proceed to create or recreate the referenced paths - // For every path on the custom resource's spec, we check the path actually - // exists on the bucket on the S3 server, and create it if it doesn't - // TODO ? : the way this is naively implemented, it's probably costly. Maybe - // we can get the "effectiveBucket" (with its quota and paths) once at the beginning, - // and iterate on this instead of interrogating the S3 server twice for every path. - // But then again, some buckets will likely be filled with many objects outside the - // scope of the CR, so getting all of them might be even more costly. - for _, pathInCr := range pathResource.Spec.Paths { - pathExists, err := r.S3Client.PathExists(pathResource.Spec.BucketName, pathInCr) - if err != nil { - logger.Error(err, "an error occurred while checking a path's existence on a bucket", "bucket", pathResource.Spec.BucketName, "path", pathInCr) - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathCheckFailed", - fmt.Sprintf("The check for path [%s] on bucket [%s] has failed", pathInCr, pathResource.Spec.BucketName), err) - } - - if !pathExists { - err = r.S3Client.CreatePath(pathResource.Spec.BucketName, pathInCr) - if err != nil { - logger.Error(err, "an error occurred while creating a path on a bucket", "bucket", pathResource.Spec.BucketName, "path", pathInCr) - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathCreationFailed", - fmt.Sprintf("The creation of path [%s] on bucket [%s] has failed", pathInCr, pathResource.Spec.BucketName), err) - } - } - } - - // The bucket reconciliation with its CR was succesful (or NOOP) - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorSucceeded", metav1.ConditionTrue, "PathsCreated", - fmt.Sprintf("The paths were created according to the specs of the [%s] CR", pathResource.Name), nil) - -} - -// SetupWithManager sets up the controller with the Manager. -func (r *PathReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&s3v1alpha1.Path{}). - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ - WithEventFilter(predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - // Only reconcile if generation has changed - return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Evaluates to false if the object has been confirmed deleted. - return !e.DeleteStateUnknown - }, - }). - WithOptions(controller.Options{MaxConcurrentReconciles: 10}). - Complete(r) -} - -func (r *PathReconciler) finalizePath(pathResource *s3v1alpha1.Path) error { - logger := log.Log.WithValues("controller", "path") - if r.PathDeletion { - var failedPaths []string = make([]string, 0) - for _, path := range pathResource.Spec.Paths { - - pathExists, err := r.S3Client.PathExists(pathResource.Spec.BucketName, path) - if err != nil { - logger.Error(err, "finalize : an error occurred while checking a path's existence on a bucket", "bucket", pathResource.Spec.BucketName, "path", path) - } - - if pathExists { - err = r.S3Client.DeletePath(pathResource.Spec.BucketName, path) - if err != nil { - failedPaths = append(failedPaths, path) - } - } - } - - if len(failedPaths) > 0 { - return fmt.Errorf("at least one path couldn't be removed from S3 backend %+q", failedPaths) - } - } - return nil -} - -func (r *PathReconciler) SetPathStatusConditionAndUpdate(ctx context.Context, pathResource *s3v1alpha1.Path, conditionType string, status metav1.ConditionStatus, reason string, message string, srcError error) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // We moved away from meta.SetStatusCondition, as the implementation did not allow for updating - // lastTransitionTime if a Condition (as identified by Reason instead of Type) was previously - // obtained and updated to again. - pathResource.Status.Conditions = utils.UpdateConditions(pathResource.Status.Conditions, metav1.Condition{ - Type: conditionType, - Status: status, - Reason: reason, - LastTransitionTime: metav1.NewTime(time.Now()), - Message: message, - ObservedGeneration: pathResource.GetGeneration(), - }) - - err := r.Status().Update(ctx, pathResource) - if err != nil { - logger.Error(err, "an error occurred while updating the status of the path resource") - return ctrl.Result{}, utilerrors.NewAggregate([]error{err, srcError}) - } - return ctrl.Result{}, srcError -} diff --git a/controllers/policy_controller.go b/controllers/policy_controller.go deleted file mode 100644 index 1dd597a..0000000 --- a/controllers/policy_controller.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "time" - - "github.com/minio/madmin-go/v3" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - "github.com/InseeFrLab/s3-operator/controllers/utils" -) - -// PolicyReconciler reconciles a Policy object -type PolicyReconciler struct { - client.Client - Scheme *runtime.Scheme - S3Client factory.S3Client - PolicyDeletion bool -} - -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies/finalizers,verbs=update - -const policyFinalizer = "s3.onyxia.sh/finalizer" - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile -func (r *PolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // Checking for policy resource existence - policyResource := &s3v1alpha1.Policy{} - err := r.Get(ctx, req.NamespacedName, policyResource) - if err != nil { - if errors.IsNotFound(err) { - logger.Info("The Policy custom resource has been removed ; as such the Policy controller is NOOP.", "req.Name", req.Name) - return ctrl.Result{}, nil - } - logger.Error(err, "An error occurred when attempting to read the Policy resource from the Kubernetes cluster") - return ctrl.Result{}, err - } - - // Managing policy deletion with a finalizer - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources - isMarkedForDeletion := policyResource.GetDeletionTimestamp() != nil - if isMarkedForDeletion { - if controllerutil.ContainsFinalizer(policyResource, policyFinalizer) { - // Run finalization logic for policyFinalizer. If the - // finalization logic fails, don't remove the finalizer so - // that we can retry during the next reconciliation. - if err := r.finalizePolicy(policyResource); err != nil { - // return ctrl.Result{}, err - logger.Error(err, "an error occurred when attempting to finalize the policy", "policy", policyResource.Spec.Name) - // return ctrl.Result{}, err - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyFinalizeFailed", - fmt.Sprintf("An error occurred when attempting to delete policy [%s]", policyResource.Spec.Name), err) - } - - // Remove policyFinalizer. Once all finalizers have been - // removed, the object will be deleted. - controllerutil.RemoveFinalizer(policyResource, policyFinalizer) - err := r.Update(ctx, policyResource) - if err != nil { - logger.Error(err, "an error occurred when removing finalizer from policy", "policy", policyResource.Spec.Name) - // return ctrl.Result{}, err - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyFinalizerRemovalFailed", - fmt.Sprintf("An error occurred when attempting to remove the finalizer from policy [%s]", policyResource.Spec.Name), err) - } - } - return ctrl.Result{}, nil - } - - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(policyResource, policyFinalizer) { - controllerutil.AddFinalizer(policyResource, policyFinalizer) - err = r.Update(ctx, policyResource) - if err != nil { - logger.Error(err, "an error occurred when adding finalizer from policy", "policy", policyResource.Spec.Name) - // return ctrl.Result{}, err - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyFinalizerAddFailed", - fmt.Sprintf("An error occurred when attempting to add the finalizer from policy [%s]", policyResource.Spec.Name), err) - } - } - - // Policy lifecycle management (other than deletion) starts here - - // Check policy existence on the S3 server - effectivePolicy, err := r.S3Client.GetPolicyInfo(policyResource.Spec.Name) - - // If the policy does not exist on S3... - if err != nil { - logger.Error(err, "an error occurred while checking the existence of a policy", "policy", policyResource.Spec.Name) - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyInfoFailed", - fmt.Sprintf("Obtaining policy[%s] info from S3 instance has failed", policyResource.Spec.Name), err) - } - - if effectivePolicy == nil { - - // Policy creation using info from the CR - err = r.S3Client.CreateOrUpdatePolicy(policyResource.Spec.Name, policyResource.Spec.PolicyContent) - if err != nil { - logger.Error(err, "an error occurred while creating the policy", "policy", policyResource.Spec.Name) - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyCreationFailed", - fmt.Sprintf("The creation of policy [%s] has failed", policyResource.Spec.Name), err) - } - - // Update status to reflect policy creation - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorSucceeded", metav1.ConditionTrue, "PolicyCreated", - fmt.Sprintf("The creation of policy [%s] has succeeded", policyResource.Spec.Name), nil) - - } - - // If the policy exists on S3, we compare its state to the custom resource that spawned it on K8S - matching, err := IsPolicyMatchingWithCustomResource(policyResource, effectivePolicy) - if err != nil { - logger.Error(err, "an error occurred while comparing actual and expected configuration for the policy", "policy", policyResource.Spec.Name) - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyComparisonFailed", - fmt.Sprintf("The comparison between the effective policy [%s] on S3 and its corresponding custom resource on K8S has failed", policyResource.Spec.Name), err) - } - // If the two match, no reconciliation is needed, but we still need to update - // the status, in case the generation changed (eg : rollback to previous state after a problematic change) - if matching { - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorSucceeded", metav1.ConditionTrue, "PolicyUnchanged", - fmt.Sprintf("The policy [%s] matches its corresponding custom resource", policyResource.Spec.Name), nil) - } - - // If not we update the policy to match the CR - err = r.S3Client.CreateOrUpdatePolicy(policyResource.Spec.Name, policyResource.Spec.PolicyContent) - if err != nil { - logger.Error(err, "an error occurred while updating the policy", "policy", policyResource.Spec.Name) - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyUpdateFailed", - fmt.Sprintf("The update of effective policy [%s] on S3 to match its corresponding custom resource on K8S has failed", policyResource.Spec.Name), err) - } - - // Update status to reflect policy update - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorSucceeded", metav1.ConditionTrue, "PolicyUpdated", - fmt.Sprintf("The policy [%s] was updated according to its matching custom resource", policyResource.Spec.Name), nil) -} - -// SetupWithManager sets up the controller with the Manager. -func (r *PolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&s3v1alpha1.Policy{}). - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ - WithEventFilter(predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - // Only reconcile if generation has changed - return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Evaluates to false if the object has been confirmed deleted. - return !e.DeleteStateUnknown - }, - }). - WithOptions(controller.Options{MaxConcurrentReconciles: 10}). - Complete(r) -} - -func IsPolicyMatchingWithCustomResource(policyResource *s3v1alpha1.Policy, effectivePolicy *madmin.PolicyInfo) (bool, error) { - // The policy content visible in the custom resource usually contains indentations and newlines - // while the one we get from S3 is compacted. In order to compare them, we compact the former. - policyResourceAsByteSlice := []byte(policyResource.Spec.PolicyContent) - buffer := new(bytes.Buffer) - err := json.Compact(buffer, policyResourceAsByteSlice) - if err != nil { - return false, err - } - - // Another gotcha is that the effective policy comes up as a json.RawContent, - // which needs marshalling in order to be properly compared to the []byte we get from the CR. - marshalled, err := json.Marshal(effectivePolicy.Policy) - if err != nil { - return false, err - } - return bytes.Equal(buffer.Bytes(), marshalled), nil -} - -func (r *PolicyReconciler) finalizePolicy(policyResource *s3v1alpha1.Policy) error { - if r.PolicyDeletion { - return r.S3Client.DeletePolicy(policyResource.Spec.Name) - } - return nil -} - -func (r *PolicyReconciler) SetPolicyStatusConditionAndUpdate(ctx context.Context, policyResource *s3v1alpha1.Policy, conditionType string, status metav1.ConditionStatus, reason string, message string, srcError error) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // We moved away from meta.SetStatusCondition, as the implementation did not allow for updating - // lastTransitionTime if a Condition (as identified by Reason instead of Type) was previously - // obtained and updated to again. - policyResource.Status.Conditions = utils.UpdateConditions(policyResource.Status.Conditions, metav1.Condition{ - Type: conditionType, - Status: status, - Reason: reason, - LastTransitionTime: metav1.NewTime(time.Now()), - Message: message, - ObservedGeneration: policyResource.GetGeneration(), - }) - - err := r.Status().Update(ctx, policyResource) - if err != nil { - logger.Error(err, "an error occurred while updating the status of the policy resource") - return ctrl.Result{}, utilerrors.NewAggregate([]error{err, srcError}) - } - return ctrl.Result{}, srcError -} diff --git a/controllers/user_controller.go b/controllers/user_controller.go deleted file mode 100644 index 93dd001..0000000 --- a/controllers/user_controller.go +++ /dev/null @@ -1,559 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "cmp" - "context" - "fmt" - "slices" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - utils "github.com/InseeFrLab/s3-operator/controllers/utils" - password "github.com/InseeFrLab/s3-operator/controllers/utils/password" -) - -// S3UserReconciler reconciles a S3User object -type S3UserReconciler struct { - client.Client - Scheme *runtime.Scheme - S3Client factory.S3Client - S3UserDeletion bool - OverrideExistingSecret bool -} - -const ( - userFinalizer = "s3.onyxia.sh/userFinalizer" -) - -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=S3User,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=S3User/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=S3User/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile -func (r *S3UserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // Checking for userResource existence - userResource := &s3v1alpha1.S3User{} - err := r.Get(ctx, req.NamespacedName, userResource) - if err != nil { - if errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("The S3User CR %s (or its owned Secret) has been removed. NOOP", req.Name)) - return ctrl.Result{}, nil - } - logger.Error(err, "An error occurred when fetching the S3User from Kubernetes") - return ctrl.Result{}, err - } - - // Check if the userResource instance is marked to be deleted, which is - // indicated by the deletion timestamp being set. The object will be deleted. - if userResource.GetDeletionTimestamp() != nil { - logger.Info("userResource have been marked for deletion") - return r.handleS3UserDeletion(ctx, userResource) - } - - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(userResource, userFinalizer) { - logger.Info("adding finalizer to user") - - controllerutil.AddFinalizer(userResource, userFinalizer) - err = r.Update(ctx, userResource) - if err != nil { - logger.Error(err, "an error occurred when adding finalizer from user", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserFinalizerAddFailed", - fmt.Sprintf("An error occurred when attempting to add the finalizer from user %s", userResource.Name), err) - } - } - - // Check user existence on the S3 server - found, err := r.S3Client.UserExist(userResource.Spec.AccessKey) - if err != nil { - logger.Error(err, "an error occurred while checking the existence of a user", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserExistenceCheckFailed", - fmt.Sprintf("The check for user %s's existence on the S3 backend has failed", userResource.Name), err) - } - - // If the user does not exist, it is created based on the CR - if !found { - logger.Info("this user doesn't exist on the S3 backend and will be created", "accessKey", userResource.Spec.AccessKey) - return r.handleS3NewUser(ctx, userResource) - } - logger.Info("this user already exists on the S3 backend and will be reconciled", "accessKey", userResource.Spec.AccessKey) - return r.handleS3ExistingUser(ctx, userResource) - -} - -func (r *S3UserReconciler) handleS3ExistingUser(ctx context.Context, userResource *s3v1alpha1.S3User) (reconcile.Result, error) { - logger := log.FromContext(ctx) - - // --- Begin Secret management section - - userOwnedSecret, err := r.getUserSecret(ctx, userResource) - if err != nil { - if err.Error() == "SecretListingFailed" { - logger.Error(err, "An error occurred when trying to obtain the user's secret. The user will be deleted from S3 backend and recreated with a secret.") - - r.deleteSecret(ctx, &userOwnedSecret) - err = r.S3Client.DeleteUser(userResource.Spec.AccessKey) - if err != nil { - logger.Error(err, "Could not delete user on S3 server", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserDeletionFailed", - fmt.Sprintf("Deletion of S3user %s on S3 server has failed", userResource.Name), err) - } - return r.handleS3NewUser(ctx, userResource) - } else if err.Error() == "S3UserSecretNameMismatch" { - logger.Info("A secret with owner reference to the user was found, but its name doesn't match the spec. This is probably due to the S3User's spec changing (specifically spec.secretName being added, changed or removed). The \"old\" secret will be deleted.") - r.deleteSecret(ctx, &userOwnedSecret) - } - } - - if userOwnedSecret.Name == "" { - logger.Info("Secret associated to user not found, user will be deleted from the S3 backend, then recreated with a secret") - err = r.S3Client.DeleteUser(userResource.Spec.AccessKey) - if err != nil { - logger.Error(err, "Could not delete user on S3 server", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserDeletionFailed", - fmt.Sprintf("Deletion of S3User %s on S3 server has failed", userResource.Name), err) - } - return r.handleS3NewUser(ctx, userResource) - } - - // --- End Secret management section - - logger.Info("Checking user policies") - userPolicies, err := r.S3Client.GetUserPolicies(userResource.Spec.AccessKey) - if err != nil { - logger.Error(err, "Could not check the user's policies") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyCheckFailed", - fmt.Sprintf("Checking the S3user %s's policies has failed", userResource.Name), err) - } - - policyToDelete := []string{} - policyToAdd := []string{} - for _, policy := range userPolicies { - policyFound := slices.Contains(userResource.Spec.Policies, policy) - if !policyFound { - logger.Info(fmt.Sprintf("S3User policy definition doesn't contain policy %s", policy)) - policyToDelete = append(policyToDelete, policy) - } - } - - for _, policy := range userResource.Spec.Policies { - policyFound := slices.Contains(userPolicies, policy) - if !policyFound { - logger.Info(fmt.Sprintf("S3User policy definition must contain policy %s", policy)) - policyToAdd = append(policyToAdd, policy) - } - } - - if len(policyToDelete) > 0 { - err = r.S3Client.RemovePoliciesFromUser(userResource.Spec.AccessKey, policyToDelete) - if err != nil { - logger.Error(err, "an error occurred while removing policy to user", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyAppendFailed", - fmt.Sprintf("Error while updating policies of user %s on S3 backend has failed", userResource.Name), err) - } - } - - if len(policyToAdd) > 0 { - err := r.S3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policyToAdd) - if err != nil { - logger.Error(err, "an error occurred while adding policy to user", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyAppendFailed", - fmt.Sprintf("Error while updating policies of user %s on S3 backend has failed", userResource.Name), err) - } - } - - // If a matching secret is found, then we check if it is still valid, as in : do the credentials it - // contains still allow authenticating the S3User on the backend ? If not, the user is deleted and recreated. - // credentialsValid, err := r.S3Client.CheckUserCredentialsValid(userResource.Name, userResource.Spec.AccessKey, string(userOwnedSecret.Data["secretKey"])) - credentialsValid, err := r.S3Client.CheckUserCredentialsValid(userResource.Name, string(userOwnedSecret.Data["accessKey"]), string(userOwnedSecret.Data["secretKey"])) - if err != nil { - logger.Error(err, "An error occurred when checking if user credentials were valid", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserCredentialsCheckFailed", - fmt.Sprintf("Checking the S3User %s's credentials on S3 server has failed", userResource.Name), err) - } - - if !credentialsValid { - logger.Info("The secret containing the credentials will be deleted, and the user will be deleted from the S3 backend, then recreated (through another reconcile)") - r.deleteSecret(ctx, &userOwnedSecret) - err = r.S3Client.DeleteUser(userResource.Spec.AccessKey) - if err != nil { - logger.Error(err, "Could not delete user on S3 server", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserDeletionFailed", - fmt.Sprintf("Deletion of S3user %s on S3 server has failed", userResource.Name), err) - } - - return r.handleS3NewUser(ctx, userResource) - - } - - logger.Info("User was reconciled without error") - - // Re-fetch the S3User to ensure we have the latest state after updating the secret - // This is necessary at least when creating a user with secretName targetting a pre-existing secret - // that has proper form (data.accessKey and data.secretKey) but isn't owned by any other s3user - if err := r.Get(ctx, types.NamespacedName{Name: userResource.Name, Namespace: userResource.Namespace}, userResource); err != nil { - logger.Error(err, "Failed to re-fetch S3User") - return ctrl.Result{}, err - } - - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorSucceeded", metav1.ConditionTrue, "S3UserUpdated", - fmt.Sprintf("The user %s was updated according to its matching custom resource", userResource.Name), nil) -} - -func (r *S3UserReconciler) handleS3NewUser(ctx context.Context, userResource *s3v1alpha1.S3User) (reconcile.Result, error) { - logger := log.FromContext(ctx) - - // Generating a random secret key - secretKey, err := password.Generate(20, true, false, true) - if err != nil { - logger.Error(err, fmt.Sprintf("Fail to generate password for user %s", userResource.Name)) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserGeneratePasswordFailed", - fmt.Sprintf("An error occurred when attempting to generate password for user %s", userResource.Name), err) - } - - // Create a new K8S Secret to hold the user's accessKey and secretKey - secret, err := r.newSecretForCR(ctx, userResource, map[string][]byte{userResource.Spec.SecretFieldNameAccessKey: []byte(userResource.Spec.AccessKey), userResource.Spec.SecretFieldNameSecretKey: []byte(secretKey)}) - if err != nil { - // Error while creating the Kubernetes secret - requeue the request. - logger.Error(err, "Could not generate Kubernetes secret") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "SecretGenerationFailed", - fmt.Sprintf("The generation of the k8s Secret %s has failed", userResource.Name), err) - } - - // For managing user creation, we first check if a Secret matching - // the user's spec (not matching the owner reference) exists - existingK8sSecret := &corev1.Secret{} - err = r.Get(ctx, types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}, existingK8sSecret) - - // If none exist : we create the user, then the secret - if err != nil && errors.IsNotFound(err) { - logger.Info("No secret found ; creating a new Secret", "Secret.Namespace", secret.Namespace, "Secret.Name", secret.Name) - - // Creating the user - err = r.S3Client.CreateUser(userResource.Spec.AccessKey, secretKey) - - if err != nil { - logger.Error(err, "an error occurred while creating user on S3 server", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserCreationFailed", - fmt.Sprintf("Creation of user %s on S3 instance has failed", userResource.Name), err) - } - - // Creating the secret - logger.Info("Creating a new secret to store the user's credentials", "secretName", secret.Name) - err = r.Create(ctx, secret) - if err != nil { - logger.Error(err, "Could not create secret") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserSecretCreationFailed", - fmt.Sprintf("The creation of the k8s Secret %s has failed", secret.Name), err) - } - - // Add policies - err = r.addPoliciesToUser(ctx, userResource) - if err != nil { - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyAppendFailed", - fmt.Sprintf("Error while updating policies of user %s on S3 instance has failed", userResource.Name), err) - } - - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorSucceeded", metav1.ConditionTrue, "S3UserCreatedWithNewSecret", - fmt.Sprintf("The S3User %s and the Secret %s were created successfully", userResource.Name, secret.Name), nil) - - } else if err != nil { - logger.Error(err, "Couldn't check secret existence") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "SecretExistenceCheckFailedDuringS3UserCreation", - fmt.Sprintf("The check for an existing secret %s during the creation of the S3User %s has failed", secret.Name, userResource.Name), err) - - } else { - // If a secret already exists, but has a different S3User owner reference, then the creation should - // fail with no requeue, and use the status to inform that the spec should be changed - for _, ref := range existingK8sSecret.OwnerReferences { - if ref.Kind == "S3User" { - if ref.UID != userResource.UID { - logger.Error(fmt.Errorf(""), "The secret matching the new S3User's spec is owned by a different S3User.", "conflictingUser", ref.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserCreationFailedAsSecretIsOwnedByAnotherS3User", - fmt.Sprintf("The secret matching the new S3User's spec is owned by a different, pre-existing S3User (%s). The S3User being created now (%s) won't be created on the S3 backend until its spec changes to target a different secret", ref.Name, userResource.Name), nil) - } - } - } - - if r.OverrideExistingSecret { - // Case 3.2 : they are not valid, but the operator is configured to overwrite it - logger.Info(fmt.Sprintf("A secret with the name %s already exists ; it will be overwritten as per operator configuration", secret.Name)) - - // Creating the user - err = r.S3Client.CreateUser(userResource.Spec.AccessKey, secretKey) - - if err != nil { - logger.Error(err, "an error occurred while creating user on S3 server", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserCreationFailed", - fmt.Sprintf("Creation of user %s on S3 instance has failed", userResource.Name), err) - } - - // Updating the secret - logger.Info("Updating the pre-existing secret with new credentials", "secretName", secret.Name) - err = r.Update(ctx, secret) - if err != nil { - logger.Error(err, "Could not update secret") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "SecretUpdateFailed", - fmt.Sprintf("The update of the k8s Secret %s has failed", secret.Name), err) - } - - // Add policies - err = r.addPoliciesToUser(ctx, userResource) - if err != nil { - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyAppendFailed", - fmt.Sprintf("Error while updating policies of user %s on S3 instance has failed", userResource.Name), err) - } - - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorSucceeded", metav1.ConditionTrue, "S3UserCreatedWithWithOverridenSecret", - fmt.Sprintf("The S3User %s was created and the Secret %s was updated successfully", userResource.Name, secret.Name), nil) - } - - // Case 3.3 : they are not valid, and the operator is configured keep the existing secret - // The user will not be created, with no requeue and with two possible ways out : either toggle - // OverrideExistingSecret on, or delete the S3User whose credentials are not working anyway. - logger.Error(nil, fmt.Sprintf("A secret with the name %s already exists ; as the operator is configured to NOT override any pre-existing secrets, this user (%s) not be created on S3 backend until spec change (to target new secret), or until the operator configuration is changed to override existing secrets", secret.Name, userResource.Name)) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorSucceeded", metav1.ConditionTrue, "S3UserCreationFailedAsSecretCannotBeOverwritten", - fmt.Sprintf("The S3User %s wasn't created, as its spec targets a secret (%s) containing invalid credentials. The user's spec should be changed to target a different secret.", userResource.Name, secret.Name), nil) - - } -} - -func (r *S3UserReconciler) addPoliciesToUser(ctx context.Context, userResource *s3v1alpha1.S3User) error { - logger := log.FromContext(ctx) - policies := userResource.Spec.Policies - if policies != nil { - err := r.S3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policies) - if err != nil { - logger.Error(err, "an error occurred while adding policy to user", "user", userResource.Name) - return err - } - } - return nil -} - -func (r *S3UserReconciler) handleS3UserDeletion(ctx context.Context, userResource *s3v1alpha1.S3User) (reconcile.Result, error) { - logger := log.FromContext(ctx) - - if controllerutil.ContainsFinalizer(userResource, userFinalizer) { - // Run finalization logic for S3UserFinalizer. If the finalization logic fails, don't remove the finalizer so that we can retry during the next reconciliation. - if err := r.finalizeS3User(userResource); err != nil { - logger.Error(err, "an error occurred when attempting to finalize the user", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserFinalizeFailed", - fmt.Sprintf("An error occurred when attempting to delete user %s", userResource.Name), err) - } - - //Remove userFinalizer. Once all finalizers have been removed, the object will be deleted. - controllerutil.RemoveFinalizer(userResource, userFinalizer) - // Unsure why the behavior is different to that of bucket/policy/path controllers, but it appears - // calling r.Update() for adding/removal of finalizer is not necessary (an update event is generated - // with the call to AddFinalizer/RemoveFinalizer), and worse, causes "freshness" problem (with the - // "the object has been modified; please apply your changes to the latest version and try again" error) - err := r.Update(ctx, userResource) - if err != nil { - logger.Error(err, "Failed to remove finalizer.") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserFinalizerRemovalFailed", - fmt.Sprintf("An error occurred when attempting to remove the finalizer from user %s", userResource.Name), err) - } - } - return ctrl.Result{}, nil -} - -func (r *S3UserReconciler) getUserSecret(ctx context.Context, userResource *s3v1alpha1.S3User) (corev1.Secret, error) { - logger := log.FromContext(ctx) - - // Listing every secrets in the S3User's namespace, as a first step - // to get the actual secret matching the S3User proper. - // TODO : proper label matching ? - secretsList := &corev1.SecretList{} - userSecret := corev1.Secret{} - - err := r.List(ctx, secretsList, client.InNamespace(userResource.Namespace)) - if err != nil { - logger.Error(err, "An error occurred while listing the secrets in user's namespace") - return userSecret, fmt.Errorf("SecretListingFailed") - } - - if len(secretsList.Items) == 0 { - logger.Info("The user's namespace doesn't appear to contain any secret") - return userSecret, nil - } - // In all the secrets inside the S3User's namespace, one should have an owner reference - // pointing to the S3User. For that specific secret, we check if its name matches the one from - // the S3User, whether explicit (userResource.Spec.SecretName) or implicit (userResource.Name) - // In case of mismatch, that secret is deleted (and will be recreated) ; if there is a match, - // it will be used for state comparison. - uid := userResource.GetUID() - - // cmp.Or takes the first non "zero" value, see https://pkg.go.dev/cmp#Or - effectiveS3UserSecretName := cmp.Or(userResource.Spec.SecretName, userResource.Name) - for _, secret := range secretsList.Items { - for _, ref := range secret.OwnerReferences { - if ref.UID == uid { - if secret.Name != effectiveS3UserSecretName { - return secret, fmt.Errorf("S3UserSecretNameMismatch") - } else { - userSecret = secret - break - } - } - } - } - - return userSecret, nil -} - -func (r *S3UserReconciler) deleteSecret(ctx context.Context, secret *corev1.Secret) { - logger := log.FromContext(ctx) - err := r.Delete(ctx, secret) - if err != nil { - logger.Error(err, "an error occurred while deleting a secret") - } -} - -// SetupWithManager sets up the controller with the Manager.* -func (r *S3UserReconciler) SetupWithManager(mgr ctrl.Manager) error { - // filterLogger := ctrl.Log.WithName("filterEvt") - return ctrl.NewControllerManagedBy(mgr). - For(&s3v1alpha1.S3User{}). - // The "secret owning" implies the reconcile loop will be called whenever a Secret owned - // by a S3User is created/updated/deleted. In other words, even when creating a single S3User, - // there is going to be several iterations. - Owns(&corev1.Secret{}). - // See : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ - WithEventFilter(predicate.Funcs{ - - // Ignore updates to CR status in which case metadata.Generation does not change, - // unless it is a change to the underlying Secret - UpdateFunc: func(e event.UpdateEvent) bool { - - // To check if the update event is tied to a change on secret, - // we try to cast e.ObjectNew to a secret (only if it's not a S3User, which - // should prevent any TypeAssertionError based panic). - secretUpdate := false - newUser, _ := e.ObjectNew.(*s3v1alpha1.S3User) - if newUser == nil { - secretUpdate = (e.ObjectNew.(*corev1.Secret) != nil) - } - - return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() || secretUpdate - }, - // Ignore create events caused by the underlying secret's creation - CreateFunc: func(e event.CreateEvent) bool { - user, _ := e.Object.(*s3v1alpha1.S3User) - return user != nil - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Evaluates to false if the object has been confirmed deleted. - return !e.DeleteStateUnknown - }, - }). - WithOptions(controller.Options{MaxConcurrentReconciles: 10}). - Complete(r) -} - -func (r *S3UserReconciler) setS3UserStatusConditionAndUpdate(ctx context.Context, userResource *s3v1alpha1.S3User, conditionType string, status metav1.ConditionStatus, reason string, message string, srcError error) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // We moved away from meta.SetStatusCondition, as the implementation did not allow for updating - // lastTransitionTime if a Condition (as identified by Reason instead of Type) was previously - // obtained and updated to again. - userResource.Status.Conditions = utils.UpdateConditions(userResource.Status.Conditions, metav1.Condition{ - Type: conditionType, - Status: status, - Reason: reason, - LastTransitionTime: metav1.NewTime(time.Now()), - Message: message, - ObservedGeneration: userResource.GetGeneration(), - }) - - err := r.Status().Update(ctx, userResource) - if err != nil { - logger.Error(err, "an error occurred while updating the status of the S3User resource") - return ctrl.Result{}, utilerrors.NewAggregate([]error{err, srcError}) - } - return ctrl.Result{}, srcError -} - -func (r *S3UserReconciler) finalizeS3User(userResource *s3v1alpha1.S3User) error { - if r.S3UserDeletion { - return r.S3Client.DeleteUser(userResource.Spec.AccessKey) - } - return nil -} - -// newSecretForCR returns a secret with the same name/namespace as the CR. -// The secret will include all labels and annotations from the CR. -func (r *S3UserReconciler) newSecretForCR(ctx context.Context, userResource *s3v1alpha1.S3User, data map[string][]byte) (*corev1.Secret, error) { - logger := log.FromContext(ctx) - - // Reusing the S3User's labels and annotations - labels := map[string]string{} - for k, v := range userResource.ObjectMeta.Labels { - labels[k] = v - } - - annotations := map[string]string{} - for k, v := range userResource.ObjectMeta.Annotations { - annotations[k] = v - } - - secretName := cmp.Or(userResource.Spec.SecretName, userResource.Name) - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: userResource.Namespace, - Labels: labels, - Annotations: annotations, - }, - Data: data, - Type: "Opaque", - } - - // Set S3User instance as the owner and controller - err := ctrl.SetControllerReference(userResource, secret, r.Scheme) - if err != nil { - logger.Error(err, "Could not set owner of kubernetes secret") - return nil, err - } - - return secret, nil - -} diff --git a/controllers/utils/utils.go b/controllers/utils/utils.go deleted file mode 100644 index 4f69b74..0000000 --- a/controllers/utils/utils.go +++ /dev/null @@ -1,29 +0,0 @@ -package utils - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// func UpdateConditions(existingConditions []metav1.Condition, conditionType string, status metav1.ConditionStatus, reason string, message string, srcError error) []metav1.Condition { -func UpdateConditions(existingConditions []metav1.Condition, newCondition metav1.Condition) []metav1.Condition { - - // Comparing reason to existing conditions' reason. - // If a match is found, only the lastTransitionTime is updated - // If not, a new condition is added to the existing list - var hasMatch, matchingIndex = false, -1 - for i, condition := range existingConditions { - if condition.Reason == newCondition.Reason { - matchingIndex = i - hasMatch = true - } - } - if hasMatch { - existingConditions[matchingIndex].LastTransitionTime = metav1.NewTime(time.Now()) - existingConditions[matchingIndex].ObservedGeneration = newCondition.ObservedGeneration - return existingConditions - } - - return append([]metav1.Condition{newCondition}, existingConditions...) -} diff --git a/deploy/charts/s3-operator/.helmignore b/deploy/charts/s3-operator/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/deploy/charts/s3-operator/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/charts/s3-operator/Chart.yaml b/deploy/charts/s3-operator/Chart.yaml new file mode 100644 index 0000000..ff83136 --- /dev/null +++ b/deploy/charts/s3-operator/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: s3-operator +description: A Helm chart for deploying an operator to manage S3 resources (eg buckets, policies) +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.6.0 +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.0.0" diff --git a/deploy/charts/s3-operator/README.md b/deploy/charts/s3-operator/README.md new file mode 100644 index 0000000..6bd39e6 --- /dev/null +++ b/deploy/charts/s3-operator/README.md @@ -0,0 +1,32 @@ +# s3-operator + +![Version: 0.3.0](https://img.shields.io/badge/Version-0.3.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.8.0](https://img.shields.io/badge/AppVersion-0.8.0-informational?style=flat-square) + +A Helm chart for deploying an operator to manage S3 resources (eg buckets, policies) + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| controllerManager.manager.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | +| controllerManager.manager.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | +| controllerManager.manager.image.repository | string | `"inseefrlab/s3-operator"` | | +| controllerManager.manager.image.tag | string | `"latest"` | | +| controllerManager.manager.imagePullPolicy | string | `"IfNotPresent"` | | +| controllerManager.manager.imagePullSecrets | list | `[]` | | +| controllerManager.manager.resources.limits.cpu | string | `"1000m"` | | +| controllerManager.manager.resources.limits.memory | string | `"512Mi"` | | +| controllerManager.manager.resources.requests.cpu | string | `"50m"` | | +| controllerManager.manager.resources.requests.memory | string | `"64Mi"` | | +| controllerManager.replicas | int | `1` | | +| crds.install | bool | `true` | Install and upgrade CRDs | +| crds.keep | bool | `true` | Keep CRDs on chart uninstall | +| kubernetesClusterDomain | string | `"cluster.local"` | | +| s3.deletion.bucket | bool | `false` | | +| s3.deletion.path | bool | `false` | | +| s3.deletion.policy | bool | `false` | | +| s3.endpointUrl | string | `"localhost:9000"` | | +| s3.existingSecret | string | `"my-s3-operator-auth-secret"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/deploy/charts/s3-operator/templates/_helpers.tpl b/deploy/charts/s3-operator/templates/_helpers.tpl new file mode 100644 index 0000000..c9ba877 --- /dev/null +++ b/deploy/charts/s3-operator/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "s3-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "s3-operator.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "s3-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "s3-operator.labels" -}} +helm.sh/chart: {{ include "s3-operator.chart" . }} +{{ include "s3-operator.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "s3-operator.selectorLabels" -}} +app.kubernetes.io/name: {{ include "s3-operator.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "s3-operator.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "s3-operator.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/deploy/charts/s3-operator/templates/crds/buckets.yaml b/deploy/charts/s3-operator/templates/crds/buckets.yaml new file mode 100644 index 0000000..7c510f9 --- /dev/null +++ b/deploy/charts/s3-operator/templates/crds/buckets.yaml @@ -0,0 +1,157 @@ +{{- if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + {{- if .Values.crds.keep }} + helm.sh/resource-policy: keep + {{- end }} + controller-gen.kubebuilder.io/version: v0.11.1 + labels: + {{- include "s3-operator.labels" . | nindent 4 }} + name: buckets.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: Bucket + listKind: BucketList + plural: buckets + singular: bucket + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Bucket is the Schema for the buckets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BucketSpec defines the desired state of Bucket + properties: + name: + description: Name of the bucket + type: string + paths: + description: Paths (folders) to create inside the bucket + items: + type: string + type: array + quota: + description: Quota to apply to the bucket + properties: + default: + description: Default quota to apply, mandatory + format: int64 + type: integer + override: + description: Optional override quota, to be used by cluster admin. + format: int64 + type: integer + required: + - default + type: object + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the bucket + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf + required: + - name + - quota + - s3InstanceRef + type: object + status: + description: BucketStatus defines the observed state of Bucket + properties: + conditions: + description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/crds/paths.yaml b/deploy/charts/s3-operator/templates/crds/paths.yaml new file mode 100644 index 0000000..f2c6219 --- /dev/null +++ b/deploy/charts/s3-operator/templates/crds/paths.yaml @@ -0,0 +1,141 @@ +{{- if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + {{- if .Values.crds.keep }} + helm.sh/resource-policy: keep + {{- end }} + controller-gen.kubebuilder.io/version: v0.11.1 + labels: + {{- include "s3-operator.labels" . | nindent 4 }} + name: paths.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: Path + listKind: PathList + plural: paths + singular: path + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Path is the Schema for the paths API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PathSpec defines the desired state of Path + properties: + bucketName: + description: Name of the bucket + type: string + paths: + description: Paths (folders) to create inside the bucket + items: + type: string + type: array + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the Paths + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf + required: + - bucketName + type: object + status: + description: PathStatus defines the observed state of Path + properties: + conditions: + description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/crds/policies.yaml b/deploy/charts/s3-operator/templates/crds/policies.yaml new file mode 100644 index 0000000..dec5a83 --- /dev/null +++ b/deploy/charts/s3-operator/templates/crds/policies.yaml @@ -0,0 +1,140 @@ +{{- if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + {{- if .Values.crds.keep }} + helm.sh/resource-policy: keep + {{- end }} + controller-gen.kubebuilder.io/version: v0.11.1 + labels: + {{- include "s3-operator.labels" . | nindent 4 }} + name: policies.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: Policy + listKind: PolicyList + plural: policies + singular: policy + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Policy is the Schema for the policies API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PolicySpec defines the desired state of Policy + properties: + name: + description: Name of the policy + type: string + policyContent: + description: Content of the policy (IAM JSON format) + type: string + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the Policy + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf + required: + - name + - policyContent + type: object + status: + description: PolicyStatus defines the observed state of Policy + properties: + conditions: + description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/crds/s3instances.yaml b/deploy/charts/s3-operator/templates/crds/s3instances.yaml new file mode 100644 index 0000000..a2354bc --- /dev/null +++ b/deploy/charts/s3-operator/templates/crds/s3instances.yaml @@ -0,0 +1,180 @@ +{{- if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + {{- if .Values.crds.keep }} + helm.sh/resource-policy: keep + {{- end }} + controller-gen.kubebuilder.io/version: v0.11.1 + labels: + {{- include "s3-operator.labels" . | nindent 4 }} + name: s3instances.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: S3Instance + listKind: S3InstanceList + plural: s3instances + singular: s3instance + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: S3Instance is the Schema for the S3Instances API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: S3InstanceSpec defines the desired state of S3Instance + properties: + allowedNamespaces: + description: AllowedNamespaces to use this S3InstanceUrl if empty + only the namespace of this instance url is allowed to use it + items: + type: string + type: array + bucketDeletionEnabled: + default: false + description: BucketDeletionEnabled Trigger bucket deletion on the + S3 backend upon CR deletion. Will fail if bucket is not empty. + type: boolean + caCertSecretRef: + description: Secret containing key ca.crt with the certificate associated + to the S3InstanceUrl + type: string + pathDeletionEnabled: + default: false + description: PathDeletionEnabled Trigger path deletion on the S3 backend + upon CR deletion. Limited to deleting the `.keep` files used by + the operator. + type: boolean + policyDeletionEnabled: + default: false + description: PolicyDeletionEnabled Trigger policy deletion on the + S3 backend upon CR deletion. + type: boolean + region: + description: region associated to the S3Instance + type: string + s3Provider: + default: minio + description: type of the S3Instance + enum: + - minio + - mockedS3Provider + type: string + x-kubernetes-validations: + - message: S3Provider is immutable + rule: self == oldSelf + s3UserDeletionEnabled: + default: false + description: S3UserDeletionEnabled Trigger S3 deletion on the S3 backend + upon CR deletion. + type: boolean + secretRef: + description: Ref to Secret associated to the S3Instance containing + accessKey and secretKey + type: string + url: + description: url of the S3Instance + type: string + required: + - bucketDeletionEnabled + - pathDeletionEnabled + - policyDeletionEnabled + - s3Provider + - s3UserDeletionEnabled + - secretRef + - url + type: object + status: + description: S3InstanceStatus defines the observed state of S3Instance + properties: + conditions: + description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/crds/s3users.yaml b/deploy/charts/s3-operator/templates/crds/s3users.yaml new file mode 100644 index 0000000..7893652 --- /dev/null +++ b/deploy/charts/s3-operator/templates/crds/s3users.yaml @@ -0,0 +1,144 @@ +{{- if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + {{- if .Values.crds.keep }} + helm.sh/resource-policy: keep + {{- end }} + controller-gen.kubebuilder.io/version: v0.11.1 + labels: + {{- include "s3-operator.labels" . | nindent 4 }} + name: s3users.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: S3User + listKind: S3UserList + plural: s3users + singular: s3user + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: S3User is the Schema for the S3Users API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: S3UserSpec defines the desired state of S3User + properties: + accessKey: + description: Name of the S3User + type: string + policies: + description: Policies associated to the S3User + items: + type: string + type: array + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the user + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf + secretName: + description: SecretName associated to the S3User + type: string + required: + - accessKey + type: object + status: + description: S3UserStatus defines the observed state of S3User + properties: + conditions: + description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/default-s3instance.yaml b/deploy/charts/s3-operator/templates/default-s3instance.yaml new file mode 100644 index 0000000..26a10ca --- /dev/null +++ b/deploy/charts/s3-operator/templates/default-s3instance.yaml @@ -0,0 +1,69 @@ +{{- if .Values.s3.default.enabled -}} +apiVersion: s3.onyxia.sh/v1alpha1 +kind: S3Instance +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + control-plane: controller-manager + {{- include "s3-operator.labels" . | nindent 4 }} + name: default +spec: + s3Provider: {{ .Values.s3.default.s3Provider }} + url: {{ .Values.s3.default.url }} + {{- if .Values.s3.default.secretRef }} + secretRef: {{ .Values.s3.default.secretRef }} + {{- else }} + secretRef: default-s3instance-credentials + {{- end }} + {{- if .Values.s3.default.caCertSecretRef }} + caCertSecretRef: {{ .Values.s3.default.caCertSecretRef }} + {{- else }} + caCertSecretRef: default-s3instance-certificates + {{- end }} + {{- if .Values.s3.default.allowedNamespaces }} + allowedNamespaces: {{ .Values.s3.default.allowedNamespaces }} + {{- end }} + {{- if .Values.s3.default.region }} + region: {{ .Values.s3.default.region }} + {{- end }} + s3UserDeletionEnabled: {{ .Values.s3.default.deletion.s3user }} + pathDeletionEnabled: {{ .Values.s3.default.deletion.path }} + policyDeletionEnabled: {{ .Values.s3.default.deletion.policy }} + bucketDeletionEnabled: {{ .Values.s3.default.deletion.bucket }} + +{{- if not .Values.s3.default.secretRef }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + control-plane: controller-manager + {{- include "s3-operator.labels" . | nindent 4 }} + name: default-s3instance-credentials +type: Opaque +data: + S3_ACCESS_KEY: {{- .Values.s3.default.accessKey }} + S3_SECRET_KEY: {{- .Values.s3.default.secretKey }} +{{- end }} +{{- if not .Values.s3.default.caCertSecretRef }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + control-plane: controller-manager + {{- include "s3-operator.labels" . | nindent 4 }} + name: default-s3instance-certificates +type: Opaque +data: + ca.crt: {{- .Values.s3.default.caCertificatesBase64 }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/deployment.yaml b/deploy/charts/s3-operator/templates/deployment.yaml new file mode 100644 index 0000000..714c9f8 --- /dev/null +++ b/deploy/charts/s3-operator/templates/deployment.yaml @@ -0,0 +1,73 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "s3-operator.fullname" . }}-controller-manager + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + control-plane: controller-manager + {{- include "s3-operator.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.controllerManager.replicas }} + selector: + matchLabels: + control-plane: controller-manager + {{- include "s3-operator.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + control-plane: controller-manager + {{- include "s3-operator.selectorLabels" . | nindent 8 }} + annotations: + kubectl.kubernetes.io/default-container: manager + spec: + {{- with .Values.controllerManager.manager.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=127.0.0.1:8080 + - --leader-elect + - --override-existing-secret={{ .Values.kubernetes.overrideExistingSecret }} + {{- if .Values.controllerManager.manager.extraArgs }} + {{- toYaml .Values.controllerManager.manager.extraArgs | nindent 8 }} + {{- end }} + command: + - /manager + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: {{ quote .Values.kubernetes.clusterDomain }} + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- range $k, $v := .Values.controllerManager.manager.extraEnv }} + - name: {{ $k }} + value: {{ $v | quote }} + {{- end }} + image: {{ .Values.controllerManager.manager.image.repository }}:{{ .Values.controllerManager.manager.image.tag | default .Chart.AppVersion }} + imagePullPolicy: {{ .Values.controllerManager.manager.imagePullPolicy | default "IfNotPresent" | quote }} + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: {{- toYaml .Values.controllerManager.manager.resources | nindent 10 + }} + securityContext: {{- toYaml .Values.controllerManager.manager.containerSecurityContext + | nindent 10 }} + securityContext: + runAsNonRoot: true + serviceAccountName: {{ include "s3-operator.fullname" . }}-controller-manager + terminationGracePeriodSeconds: 10 diff --git a/deploy/charts/s3-operator/templates/leader-election-rbac.yaml b/deploy/charts/s3-operator/templates/leader-election-rbac.yaml new file mode 100644 index 0000000..de821b6 --- /dev/null +++ b/deploy/charts/s3-operator/templates/leader-election-rbac.yaml @@ -0,0 +1,59 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "s3-operator.fullname" . }}-leader-election-role + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + {{- include "s3-operator.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "s3-operator.fullname" . }}-leader-election-rolebinding + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + {{- include "s3-operator.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: '{{ include "s3-operator.fullname" . }}-leader-election-role' +subjects: +- kind: ServiceAccount + name: '{{ include "s3-operator.fullname" . }}-controller-manager' + namespace: '{{ .Release.Namespace }}' \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/manager-rbac.yaml b/deploy/charts/s3-operator/templates/manager-rbac.yaml new file mode 100644 index 0000000..6349b24 --- /dev/null +++ b/deploy/charts/s3-operator/templates/manager-rbac.yaml @@ -0,0 +1,181 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "s3-operator.fullname" . }}-manager-role + labels: + {{- include "s3-operator.labels" . | nindent 4 }} +rules: +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - secrets/finalizers + verbs: + - update +- apiGroups: + - "" + resources: + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - s3.onyxia.sh + resources: + - buckets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - buckets/finalizers + verbs: + - update +- apiGroups: + - s3.onyxia.sh + resources: + - buckets/status + verbs: + - get + - patch + - update +- apiGroups: + - s3.onyxia.sh + resources: + - paths + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - paths/finalizers + verbs: + - update +- apiGroups: + - s3.onyxia.sh + resources: + - paths/status + verbs: + - get + - patch + - update +- apiGroups: + - s3.onyxia.sh + resources: + - policies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - policies/finalizers + verbs: + - update +- apiGroups: + - s3.onyxia.sh + resources: + - policies/status + verbs: + - get + - patch + - update +- apiGroups: + - s3.onyxia.sh + resources: + - s3instances + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - s3instances/finalizers + verbs: + - update +- apiGroups: + - s3.onyxia.sh + resources: + - s3instances/status + verbs: + - get + - patch + - update +- apiGroups: + - s3.onyxia.sh + resources: + - s3users + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - s3users/finalizers + verbs: + - update +- apiGroups: + - s3.onyxia.sh + resources: + - s3users/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "s3-operator.fullname" . }}-manager-rolebinding + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + {{- include "s3-operator.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: '{{ include "s3-operator.fullname" . }}-manager-role' +subjects: +- kind: ServiceAccount + name: '{{ include "s3-operator.fullname" . }}-controller-manager' + namespace: '{{ .Release.Namespace }}' \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/serviceaccount.yaml b/deploy/charts/s3-operator/templates/serviceaccount.yaml new file mode 100644 index 0000000..78f981e --- /dev/null +++ b/deploy/charts/s3-operator/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "s3-operator.fullname" . }}-controller-manager + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + {{- include "s3-operator.labels" . | nindent 4 }} diff --git a/deploy/charts/s3-operator/values.yaml b/deploy/charts/s3-operator/values.yaml new file mode 100644 index 0000000..c8109cd --- /dev/null +++ b/deploy/charts/s3-operator/values.yaml @@ -0,0 +1,51 @@ +crds: + # -- Install and upgrade CRDs + install: true + # -- Keep CRDs on chart uninstall + keep: true + +controllerManager: + manager: + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + image: + repository: inseefrlab/s3-operator + tag: latest + imagePullPolicy: IfNotPresent + imagePullSecrets: [] + resources: + limits: + cpu: 1000m + memory: 512Mi + requests: + cpu: 50m + memory: 64Mi + extraArgs: [] + extraEnv: {} + replicas: 1 + +kubernetes: + clusterDomain: cluster.local + overrideExistingSecret: false + +s3: + default: + enabled: false + s3Provider: minio + url: "https://localhost:9000" + accessKey: "accessKey" + secretKey: "secretKey" + caCertificatesBase64: base64encodedPEMFormatCACertificate + region: us-east-1 + # secretRef: "my-s3-operator-auth-secret" + # caCertSecretRef: "my-s3-operator-cert-secret" + # allowedNamespaces: "" + # Should the operator try to delete the resource from the S3 backend upon CR deletion ? + deletion: + bucket: false + path: false + policy: false + s3user: false diff --git a/docs/.gitkeep b/docs/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/go.mod b/go.mod index e781b36..70381ce 100644 --- a/go.mod +++ b/go.mod @@ -1,98 +1,103 @@ module github.com/InseeFrLab/s3-operator -go 1.22 +go 1.23.0 require ( - github.com/minio/madmin-go/v3 v3.0.34 - github.com/minio/minio-go/v7 v7.0.64 - github.com/onsi/ginkgo/v2 v2.11.0 - github.com/onsi/gomega v1.27.10 - go.uber.org/zap v1.25.0 - k8s.io/api v0.28.3 - k8s.io/apimachinery v0.28.3 - k8s.io/client-go v0.28.3 - sigs.k8s.io/controller-runtime v0.16.3 + github.com/minio/madmin-go/v3 v3.0.90 + github.com/minio/minio-go/v7 v7.0.84 + github.com/onsi/ginkgo/v2 v2.22.2 + github.com/onsi/gomega v1.36.2 + github.com/stretchr/testify v1.9.0 + go.uber.org/zap v1.27.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + sigs.k8s.io/controller-runtime v0.20.1 +) + +require ( + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/goccy/go-json v0.10.4 // indirect + github.com/prometheus/prometheus v0.54.1 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/x448/float16 v0.8.4 // indirect ) require ( github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect - github.com/go-logr/zapr v1.2.4 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/imdario/mergo v0.3.6 // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/uuid v1.6.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.7 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect - github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/philhofer/fwd v1.1.2 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect - github.com/prometheus/prom2json v1.3.3 // indirect - github.com/rs/xid v1.5.0 // indirect - github.com/safchain/ethtool v0.3.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/prometheus/client_golang v1.20.4 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.60.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/prom2json v1.4.1 // indirect + github.com/rs/xid v1.6.0 // indirect + github.com/safchain/ethtool v0.4.1 // indirect github.com/secure-io/sio-go v0.3.1 // indirect - github.com/shirou/gopsutil/v3 v3.23.1 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect + github.com/shirou/gopsutil/v3 v3.24.5 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tinylib/msgp v1.1.8 // indirect - github.com/tklauser/go-sysconf v0.3.11 // indirect - github.com/tklauser/numcpus v0.6.0 // indirect - github.com/yusufpapurcu/wmi v1.2.2 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/tinylib/msgp v1.2.5 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.9.3 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.7.0 // indirect + golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/protobuf v1.36.1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.28.3 // indirect - k8s.io/component-base v0.28.3 // indirect - k8s.io/klog/v2 v2.100.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + k8s.io/apiextensions-apiserver v0.32.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 9790a39..c2f0df3 100644 --- a/go.sum +++ b/go.sum @@ -1,104 +1,92 @@ -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de h1:V53FWzU6KAZVi1tPp5UIsMoUWJ2/PNwYIDXnu7QuBCE= -github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0= +github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/minio/madmin-go/v3 v3.0.34 h1:MGPQYIWm52liSubofK24FhrznPYnRpQrDNddZJEyBPA= -github.com/minio/madmin-go/v3 v3.0.34/go.mod h1:4QN2NftLSV7MdlT50dkrenOMmNVHluxTvlqJou3hte8= +github.com/minio/madmin-go/v3 v3.0.90 h1:Lz6a6eT1h5QT54fkbsEJ0xcWuvBjE1IaNgxfkxe6Qxs= +github.com/minio/madmin-go/v3 v3.0.90/go.mod h1:pMLdj9OtN0CANNs5tdm6opvOlDFfj0WhbztboZAjRWE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.64 h1:Zdza8HwOzkld0ZG/og50w56fKi6AAyfqfifmasD9n2Q= -github.com/minio/minio-go/v7 v7.0.64/go.mod h1:R4WVUR6ZTedlCcGwZRauLMIKjgyaWxhs4Mqi/OMPmEc= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/minio/minio-go/v7 v7.0.84 h1:D1HVmAF8JF8Bpi6IU4V9vIEj+8pc+xU88EWMs2yed0E= +github.com/minio/minio-go/v7 v7.0.84/go.mod h1:57YXpvc5l3rjPdhqNrDsvVlY0qPI6UTk1bflAe+9doY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -106,211 +94,153 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= -github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= -github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/prometheus/prom2json v1.3.3 h1:IYfSMiZ7sSOfliBoo89PcufjWO4eAR0gznGcETyaUgo= -github.com/prometheus/prom2json v1.3.3/go.mod h1:Pv4yIPktEkK7btWsrUTWDDDrnpUrAELaOCj+oFwlgmc= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= -github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prom2json v1.4.1 h1:7McxdrHgPEOtMwWjkKtd0v5AhpR2Q6QAnlHKVxq0+tQ= +github.com/prometheus/prom2json v1.4.1/go.mod h1:CzOQykSKFxXuC7ELUZHOHQvwKesQ3eN0p2PWLhFitQM= +github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ= +github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/safchain/ethtool v0.4.1 h1:S6mEleTADqgynileXoiapt/nKnatyR6bmIHoF+h2ADo= +github.com/safchain/ethtool v0.4.1/go.mod h1:XLLnZmy4OCRTkksP/UiMjij96YmIsBfmBQcs7H6tA48= github.com/secure-io/sio-go v0.3.1 h1:dNvY9awjabXTYGsTF1PiCySl9Ltofk9GA3VdWlo7rRc= github.com/secure-io/sio-go v0.3.1/go.mod h1:+xbkjDzPjwh4Axd07pRKSNriS9SCiYksWnZqdnfpQxs= -github.com/shirou/gopsutil/v3 v3.23.1 h1:a9KKO+kGLKEvcPIs4W62v0nu3sciVDOOOPUD0Hz7z/4= -github.com/shirou/gopsutil/v3 v3.23.1/go.mod h1:NN6mnm5/0k8jw4cBfCnJtr5L7ErOTg18tMNpgFkn0hA= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= -github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= -github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= -github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= -github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= -golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= -k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08= -k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= -k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= -k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= -k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI= -k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= -sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= +k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= +k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= +k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= +k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= +k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= +sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/internal/controller/bucket/constants.go b/internal/controller/bucket/constants.go new file mode 100644 index 0000000..4feb057 --- /dev/null +++ b/internal/controller/bucket/constants.go @@ -0,0 +1,19 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller + +const bucketFinalizer = "s3.onyxia.sh/finalizer" diff --git a/internal/controller/bucket/controller.go b/internal/controller/bucket/controller.go new file mode 100644 index 0000000..593f4e9 --- /dev/null +++ b/internal/controller/bucket/controller.go @@ -0,0 +1,65 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller + +import ( + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets/finalizers,verbs=update + +// BucketReconciler reconciles a Bucket object +type BucketReconciler struct { + client.Client + Scheme *runtime.Scheme + ReconcilePeriod time.Duration + S3factory s3factory.S3Factory + ControllerHelper *helpers.ControllerHelper + S3Instancehelper *helpers.S3InstanceHelper +} + +// SetupWithManager sets up the controller with the Manager.* +func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&s3v1alpha1.Bucket{}). + // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Only reconcile if generation has changed + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 10}). + Complete(r) +} diff --git a/internal/controller/bucket/finalizer.go b/internal/controller/bucket/finalizer.go new file mode 100644 index 0000000..3517ac1 --- /dev/null +++ b/internal/controller/bucket/finalizer.go @@ -0,0 +1,112 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller + +import ( + "context" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func (r *BucketReconciler) handleDeletion( + ctx context.Context, + req reconcile.Request, + bucketResource *s3v1alpha1.Bucket, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + if controllerutil.ContainsFinalizer(bucketResource, bucketFinalizer) { + + if err := r.finalizeBucket(ctx, bucketResource); err != nil { + logger.Error( + err, + "An error occurred while attempting to finalize the bucket", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.DeletionFailure, + "Bucket deletion has failed", + err, + ) + } + + if ok := controllerutil.RemoveFinalizer(bucketResource, bucketFinalizer); !ok { + logger.Info( + "Failed to remove finalizer for bucketResource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + // Let's re-fetch the S3Instance Custom Resource after removing the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Update(ctx, bucketResource); err != nil { + logger.Error( + err, + "Failed to remove finalizer for bucketResource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + } + return ctrl.Result{}, nil +} + +func (r *BucketReconciler) finalizeBucket( + ctx context.Context, + bucketResource *s3v1alpha1.Bucket, +) error { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + bucketResource.Name, + bucketResource.Namespace, + bucketResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return err + } + if s3Client.GetConfig().BucketDeletionEnabled { + return s3Client.DeleteBucket(bucketResource.Spec.Name) + } + return nil +} diff --git a/internal/controller/bucket/finalizer_test.go b/internal/controller/bucket/finalizer_test.go new file mode 100644 index 0000000..5ccfb22 --- /dev/null +++ b/internal/controller/bucket/finalizer_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller_test + +import ( + "context" + "testing" + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + bucket_controller "github.com/InseeFrLab/s3-operator/internal/controller/bucket" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleDelete(t *testing.T) { + + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + bucketResource := &s3v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-bucket", + Namespace: "default", + Generation: 1, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.BucketSpec{ + Name: "example-bucket", + S3InstanceRef: "s3-operator/default", + Quota: s3v1alpha1.Quota{Default: 10}, + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, bucketResource}) + + // Create the reconciler + reconciler := &bucket_controller.BucketReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: bucketResource.Name, Namespace: bucketResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("ressource have been deleted", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: bucketResource.Name, Namespace: bucketResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + policy := &s3v1alpha1.Bucket{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "example-bucket", + }, policy) + assert.NotNil(t, err) + assert.ErrorContains(t, err, "buckets.s3.onyxia.sh \"example-bucket\" not found") + }) + +} diff --git a/internal/controller/bucket/reconcile.go b/internal/controller/bucket/reconcile.go new file mode 100644 index 0000000..75e140c --- /dev/null +++ b/internal/controller/bucket/reconcile.go @@ -0,0 +1,488 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller + +import ( + "context" + "fmt" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Checking for bucket resource existence + bucketResource := &s3v1alpha1.Bucket{} + err := r.Get(ctx, req.NamespacedName, bucketResource) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + "The Bucket custom resource has been removed ; as such the Bucket controller is NOOP.", + "req.Name", + req.Name, + ) + return ctrl.Result{}, nil + } + logger.Error( + err, + "An error occurred when attempting to read the Bucket resource from the Kubernetes cluster", + ) + return ctrl.Result{}, err + } + + // Let's just set the status as Unknown when no status are available + if len(bucketResource.Status.Conditions) == 0 { + meta.SetStatusCondition( + &bucketResource.Status.Conditions, + metav1.Condition{ + Type: s3v1alpha1.ConditionReconciled, + Status: metav1.ConditionUnknown, + ObservedGeneration: bucketResource.Generation, + Reason: s3v1alpha1.Reconciling, + Message: "Starting reconciliation", + }, + ) + if err = r.Status().Update(ctx, bucketResource); err != nil { + logger.Error( + err, + "Failed to update bucketRessource status", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the bucketResource Custom Resource after update the status + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, bucketResource); err != nil { + logger.Error( + err, + "Failed to re-fetch bucketResource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(bucketResource, bucketFinalizer) { + logger.Info("Adding finalizer to bucket resource", "bucketName", + bucketResource.Spec.Name, "NamespacedName", req.NamespacedName.String()) + if ok := controllerutil.AddFinalizer(bucketResource, bucketFinalizer); !ok { + logger.Error( + err, + "Failed to add finalizer into bucket resource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + if err := r.Update(ctx, bucketResource); err != nil { + logger.Error( + err, + "An error occurred when adding finalizer on bucketResource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + if err := r.Get(ctx, req.NamespacedName, bucketResource); err != nil { + logger.Error( + err, + "Failed to re-fetch bucketResource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // // Managing bucket deletion with a finalizer + // // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources + if bucketResource.GetDeletionTimestamp() != nil { + logger.Info("bucketResource have been marked for deletion", "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.handleDeletion(ctx, req, bucketResource) + } + + return r.handleReconciliation(ctx, req, bucketResource) + +} + +func (r *BucketReconciler) handleReconciliation( + ctx context.Context, + req reconcile.Request, + bucketResource *s3v1alpha1.Bucket, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + bucketResource.Name, + bucketResource.Namespace, + bucketResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Bucket lifecycle management (other than deletion) starts here + // Check bucket existence on the S3 server + found, err := s3Client.BucketExists(bucketResource.Spec.Name) + if err != nil { + logger.Error( + err, + "An error occurred while checking the existence of a bucket", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + "Error while checking if bucket already exist", + err, + ) + + } + + // If the bucket does not exist, it is created based on the CR (with potential quotas and paths) + if !found { + return r.handleCreation(ctx, req, bucketResource) + } + + return r.handleUpdate(ctx, req, bucketResource) + +} + +func (r *BucketReconciler) handleUpdate( + ctx context.Context, + req reconcile.Request, + bucketResource *s3v1alpha1.Bucket, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + bucketResource.Name, + bucketResource.Namespace, + bucketResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error( + err, + "An error occurred while getting s3Client for bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // If the bucket exists on the S3 server, then we need to compare it to + // its corresponding custom resource, and update it in case the CR has changed. + + // Checking effectiveQuota existence on the bucket + effectiveQuota, err := s3Client.GetQuota(bucketResource.Spec.Name) + if err != nil { + logger.Error( + err, + "An error occurred while checking the quota for bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + "Checking quota has failed", + err, + ) + } + + // If a quota exists, we check it versus the spec of the CR. In case they don't match, + // we reset the quota using the value from CR ("override" is present, "default" if not) + + // Choosing between override / default + quotaToResetTo := bucketResource.Spec.Quota.Override + if quotaToResetTo == 0 { + quotaToResetTo = bucketResource.Spec.Quota.Default + } + + if effectiveQuota != quotaToResetTo { + err = s3Client.SetQuota(bucketResource.Spec.Name, quotaToResetTo) + if err != nil { + logger.Error( + err, + "An error occurred while resetting the quota for bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "The quota update (%v => %v) has failed", + effectiveQuota, + quotaToResetTo, + ), + err, + ) + } + } + + // For every path on the custom resource's spec, we check the path actually + // exists on the bucket on the S3 server, and create it if it doesn't + // TODO ? : the way this is naively implemented, it's probably costly. Maybe + // we can get the "effectiveBucket" (with its quota and paths) once at the beginning, + // and iterate on this instead of interrogating the S3 server twice for every path. + // But then again, some buckets will likely be filled with many objects outside the + // scope of the CR, so getting all of them might be even more costly. + for _, pathInCr := range bucketResource.Spec.Paths { + pathExists, err := s3Client.PathExists(bucketResource.Spec.Name, pathInCr) + if err != nil { + logger.Error( + err, + "An error occurred while checking a path's existence for bucket ressource", + "path", + pathInCr, + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + fmt.Sprintf("The check for path [%s] in bucket has failed", pathInCr), + err, + ) + } + + if !pathExists { + err = s3Client.CreatePath(bucketResource.Spec.Name, pathInCr) + if err != nil { + logger.Error( + err, + "An error occurred while creating a path for bucket ressource", + "path", + pathInCr, + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + fmt.Sprintf("The creation of path [%s] in bucket has failed", pathInCr), + err, + ) + } + } + } + + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Reconciled, + "Bucket reconciled", + nil, + ) +} + +func (r *BucketReconciler) handleCreation( + ctx context.Context, + req reconcile.Request, + bucketResource *s3v1alpha1.Bucket, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + bucketResource.Name, + bucketResource.Namespace, + bucketResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error( + err, + "An error occurred while getting s3Client for bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Bucket creation + err = s3Client.CreateBucket(bucketResource.Spec.Name) + if err != nil { + logger.Error( + err, + "An error occurred while creating bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.CreationFailure, + "An error occurred while creating bucket", + err, + ) + } + + // Setting quotas + err = s3Client.SetQuota(bucketResource.Spec.Name, bucketResource.Spec.Quota.Default) + if err != nil { + logger.Error( + err, + "An error occurred while setting quota for bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "Setting a quota of [%v] on bucket [%s] has failed", + bucketResource.Spec.Quota.Default, + bucketResource.Spec.Name, + ), + err, + ) + } + + // Path creation + for _, pathInCr := range bucketResource.Spec.Paths { + err = s3Client.CreatePath(bucketResource.Spec.Name, pathInCr) + if err != nil { + logger.Error( + err, + "An error occurred while creating path for bucket ressource", + "path", + pathInCr, + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + fmt.Sprintf("Creation for path [%s] in bucket has failed", pathInCr), + err, + ) + } + } + + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Reconciled, + "Bucket reconciled", + nil, + ) +} diff --git a/internal/controller/bucket/reconcile_test.go b/internal/controller/bucket/reconcile_test.go new file mode 100644 index 0000000..154b0f2 --- /dev/null +++ b/internal/controller/bucket/reconcile_test.go @@ -0,0 +1,140 @@ +/* +Copyright 2024 Mathieu Parent . + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller_test + +import ( + "context" + "testing" + + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + bucket_controller "github.com/InseeFrLab/s3-operator/internal/controller/bucket" + "github.com/stretchr/testify/assert" +) + +func TestHandleCreate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + bucketResource := &s3v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.BucketSpec{ + Name: "test-bucket", + S3InstanceRef: "s3-operator/default", + Quota: s3v1alpha1.Quota{Default: 10}, + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "s3.onyxia.sh/v1alpha1", + Kind: "Bucket", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, bucketResource}) + + // Create the reconciler + reconciler := &bucket_controller.BucketReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: bucketResource.Name, Namespace: bucketResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} + +func TestHandleUpdate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + bucketResource := &s3v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-bucket", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.BucketSpec{ + Name: "existing-bucket", + Paths: []string{"example"}, + S3InstanceRef: "s3-operator/default", + Quota: s3v1alpha1.Quota{Default: 10}, + }, + } + + // Create a fake client with a sample CR + bucketInvalidResource := &s3v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-invalid-bucket", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.BucketSpec{ + Name: "existing-invalid-bucket", + Paths: []string{"example", "non-existing"}, + S3InstanceRef: "s3-operator/default", + Quota: s3v1alpha1.Quota{Default: 100}}, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, bucketResource, bucketInvalidResource}) + + // Create the reconciler + reconciler := &bucket_controller.BucketReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: bucketResource.Name, Namespace: bucketResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: bucketInvalidResource.Name, Namespace: bucketInvalidResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} diff --git a/internal/controller/bucket/status.go b/internal/controller/bucket/status.go new file mode 100644 index 0000000..e4ff414 --- /dev/null +++ b/internal/controller/bucket/status.go @@ -0,0 +1,47 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller + +import ( + "context" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + + ctrl "sigs.k8s.io/controller-runtime" +) + +func (r *BucketReconciler) SetReconciledCondition( + ctx context.Context, + req ctrl.Request, + bucketResource *s3v1alpha1.Bucket, + reason string, + message string, + err error, +) (ctrl.Result, error) { + return r.ControllerHelper.SetReconciledCondition( + ctx, + r.Status(), + req, + bucketResource, + &bucketResource.Status.Conditions, + s3v1alpha1.ConditionReconciled, + reason, + message, + err, + r.ReconcilePeriod, + ) +} diff --git a/internal/controller/path/constants.go b/internal/controller/path/constants.go new file mode 100644 index 0000000..7bce285 --- /dev/null +++ b/internal/controller/path/constants.go @@ -0,0 +1,19 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller + +const pathFinalizer = "s3.onyxia.sh/finalizer" diff --git a/internal/controller/path/controller.go b/internal/controller/path/controller.go new file mode 100644 index 0000000..8c80136 --- /dev/null +++ b/internal/controller/path/controller.go @@ -0,0 +1,65 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller + +import ( + "time" + + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths/finalizers,verbs=update + +// PathReconciler reconciles a Path object +type PathReconciler struct { + client.Client + Scheme *runtime.Scheme + ReconcilePeriod time.Duration + S3factory s3factory.S3Factory + ControllerHelper *helpers.ControllerHelper + S3Instancehelper *helpers.S3InstanceHelper +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PathReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&s3v1alpha1.Path{}). + // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Only reconcile if generation has changed + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 10}). + Complete(r) +} diff --git a/internal/controller/path/finalizer.go b/internal/controller/path/finalizer.go new file mode 100644 index 0000000..1f60c84 --- /dev/null +++ b/internal/controller/path/finalizer.go @@ -0,0 +1,142 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller + +import ( + "context" + "fmt" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *PathReconciler) handleDeletion( + ctx context.Context, + req reconcile.Request, + pathResource *s3v1alpha1.Path, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + if controllerutil.ContainsFinalizer(pathResource, pathFinalizer) { + if err := r.finalizePath(ctx, pathResource); err != nil { + logger.Error( + err, + "An error occurred when attempting to finalize the path", + "path", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.DeletionFailure, + "Path deletion has failed", + err, + ) + } + + // Remove pathFinalizer. Once all finalizers have been + // removed, the object will be deleted. + + if ok := controllerutil.RemoveFinalizer(pathResource, pathFinalizer); !ok { + logger.Info( + "Failed to remove finalizer for pathResource", + "pathResource", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + // Let's re-fetch the S3Instance Custom Resource after removing the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Update(ctx, pathResource); err != nil { + logger.Error( + err, + "An error occurred when removing finalizer from pathResource", + "pathResource", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + } + return ctrl.Result{}, nil +} + +func (r *PathReconciler) finalizePath(ctx context.Context, pathResource *s3v1alpha1.Path) error { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + pathResource.Name, + pathResource.Namespace, + pathResource.Spec.S3InstanceRef, + ) + + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return err + } + + if s3Client.GetConfig().PathDeletionEnabled { + var failedPaths []string = make([]string, 0) + for _, path := range pathResource.Spec.Paths { + + pathExists, err := s3Client.PathExists(pathResource.Spec.BucketName, path) + if err != nil { + logger.Error( + err, + "finalize : an error occurred while checking a path's existence on a bucket", + "bucket", + pathResource.Spec.BucketName, + "path", + path, + ) + } + + if pathExists { + err = s3Client.DeletePath(pathResource.Spec.BucketName, path) + if err != nil { + failedPaths = append(failedPaths, path) + } + } + } + + if len(failedPaths) > 0 { + return fmt.Errorf( + "at least one path couldn't be removed from S3 backend %+q", + failedPaths, + ) + } + } + return nil +} diff --git a/internal/controller/path/finalizer_test.go b/internal/controller/path/finalizer_test.go new file mode 100644 index 0000000..6074cf3 --- /dev/null +++ b/internal/controller/path/finalizer_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller_test + +import ( + "context" + "testing" + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + path_controller "github.com/InseeFrLab/s3-operator/internal/controller/path" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleDelete(t *testing.T) { + + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + pathResource := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-path", + Namespace: "default", + Generation: 1, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.PathSpec{ + S3InstanceRef: "s3-operator/default", + BucketName: "my-bucket", + Paths: []string{"path1"}, + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, pathResource}) + + // Create the reconciler + reconciler := &path_controller.PathReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathResource.Name, Namespace: pathResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("ressource have been deleted", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathResource.Name, Namespace: pathResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + path := &s3v1alpha1.Path{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "example-path", + }, path) + assert.NotNil(t, err) + assert.ErrorContains(t, err, "paths.s3.onyxia.sh \"example-path\" not found") + }) + +} diff --git a/internal/controller/path/reconcile.go b/internal/controller/path/reconcile.go new file mode 100644 index 0000000..e082e5f --- /dev/null +++ b/internal/controller/path/reconcile.go @@ -0,0 +1,292 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller + +import ( + "context" + "fmt" + + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *PathReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Checking for path resource existence + pathResource := &s3v1alpha1.Path{} + err := r.Get(ctx, req.NamespacedName, pathResource) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + "The Path custom resource has been removed ; as such the Path controller is NOOP.", + "req.Name", + req.Name, + ) + return ctrl.Result{}, nil + } + logger.Error( + err, + "An error occurred when attempting to read the Path resource from the Kubernetes cluster", + ) + return ctrl.Result{}, err + } + + // Let's just set the status as Unknown when no status are available + if len(pathResource.Status.Conditions) == 0 { + meta.SetStatusCondition( + &pathResource.Status.Conditions, + metav1.Condition{ + Type: s3v1alpha1.ConditionReconciled, + Status: metav1.ConditionUnknown, + ObservedGeneration: pathResource.Generation, + Reason: s3v1alpha1.Reconciling, + Message: "Starting reconciliation", + }, + ) + if err = r.Status().Update(ctx, pathResource); err != nil { + logger.Error( + err, + "Failed to update pathResource status", + "pathResourceName", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the s3InstanceResource Custom Resource after update the status + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, pathResource); err != nil { + logger.Error( + err, + "Failed to re-fetch pathResource", + "pathResourceName", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(pathResource, pathFinalizer) { + logger.Info("Adding finalizer to pathResource", "pathResourceName", + pathResource.Name, "NamespacedName", req.NamespacedName.String()) + if ok := controllerutil.AddFinalizer(pathResource, pathFinalizer); !ok { + logger.Error( + err, + "Failed to add finalizer into pathResource", + "pathResourceName", + pathResource.Name, "NamespacedName", req.NamespacedName.String()) + return ctrl.Result{Requeue: true}, nil + } + + if err := r.Update(ctx, pathResource); err != nil { + logger.Error( + err, + "an error occurred when adding finalizer on pathResource", + "pathResourceName", + pathResource.Name, "NamespacedName", req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + if err := r.Get(ctx, req.NamespacedName, pathResource); err != nil { + logger.Error( + err, + "Failed to re-fetch pathResource", + "pathResourceName", + pathResource.Name, "NamespacedName", req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Managing path deletion with a finalizer + // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources + if pathResource.GetDeletionTimestamp() != nil { + logger.Info("pathResource have been marked for deletion", "pathResourceName", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.handleDeletion(ctx, req, pathResource) + } + + return r.handleReconciliation(ctx, req, pathResource) + +} + +func (r *PathReconciler) handleReconciliation( + ctx context.Context, + req reconcile.Request, + pathResource *s3v1alpha1.Path, +) (reconcile.Result, error) { + + logger := log.FromContext(ctx) + + // Create S3Client + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + pathResource.Name, + pathResource.Namespace, + pathResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Path lifecycle management (other than deletion) starts here + + // Check bucket existence on the S3 server + bucketFound, err := s3Client.BucketExists(pathResource.Spec.BucketName) + if err != nil { + logger.Error( + err, + "an error occurred while checking the existence of a bucket", + "bucketName", + pathResource.Spec.BucketName, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.Unreachable, + "Error while checking if bucket already exist", + err, + ) + } + + // If bucket does not exist, the Path CR should be in a failing state + if !bucketFound { + errorBucketNotFound := fmt.Errorf( + "the path CR %s references a non-existing bucket : %s", + pathResource.Name, + pathResource.Spec.BucketName, + ) + logger.Error(errorBucketNotFound, errorBucketNotFound.Error(), "pathResourceName", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.CreationFailure, + fmt.Sprintf( + "The Path CR [%s] references a non-existing bucket [%s]", + pathResource.Name, + pathResource.Spec.BucketName, + ), + err, + ) + } + + // If the bucket exists, proceed to create or recreate the referenced paths + // For every path on the custom resource's spec, we check the path actually + // exists on the bucket on the S3 server, and create it if it doesn't + // TODO ? : the way this is naively implemented, it's probably costly. Maybe + // we can get the "effectiveBucket" (with its quota and paths) once at the beginning, + // and iterate on this instead of interrogating the S3 server twice for every path. + // But then again, some buckets will likely be filled with many objects outside the + // scope of the CR, so getting all of them might be even more costly. + for _, pathInCr := range pathResource.Spec.Paths { + pathExists, err := s3Client.PathExists(pathResource.Spec.BucketName, pathInCr) + if err != nil { + logger.Error( + err, + "An error occurred while checking a path's existence for bucket ressource", + "path", + pathInCr, + "bucketName", + pathResource.Spec.BucketName, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.Unreachable, + fmt.Sprintf("The check for path [%s] in bucket has failed", pathInCr), + err, + ) + } + + if !pathExists { + err = s3Client.CreatePath(pathResource.Spec.BucketName, pathInCr) + if err != nil { + logger.Error( + err, + "An error occurred while creating a path for bucket ressource", + "path", + pathInCr, + "bucketName", + pathResource.Spec.BucketName, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.Unreachable, + fmt.Sprintf("The creation of path [%s] in bucket has failed", pathInCr), + err, + ) + } + } + } + + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.Reconciled, + "Path reconciled", + nil, + ) +} diff --git a/internal/controller/path/reconcile_test.go b/internal/controller/path/reconcile_test.go new file mode 100644 index 0000000..52b5165 --- /dev/null +++ b/internal/controller/path/reconcile_test.go @@ -0,0 +1,156 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller_test + +import ( + "context" + "testing" + + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + path_controller "github.com/InseeFrLab/s3-operator/internal/controller/path" + "github.com/stretchr/testify/assert" +) + +func TestHandleCreate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + pathResource := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-path", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.PathSpec{ + BucketName: "existing-bucket", + Paths: []string{"mypath"}, + S3InstanceRef: "s3-operator/default", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, pathResource}) + + // Create the reconciler + reconciler := &path_controller.PathReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathResource.Name, Namespace: pathResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} + +func TestHandleUpdate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + pathResource := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-path", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.PathSpec{ + BucketName: "existing-bucket", + S3InstanceRef: "s3-operator/default", + Paths: []string{"example"}, + }, + } + + // Create a fake client with a sample CR + pathInvalidResource := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-invalid-paths", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.PathSpec{ + BucketName: "existing-invalid-bucket", + S3InstanceRef: "s3-operator/default", + Paths: []string{"example", "non-existing"}, + }, + } + + pathInvalidResource2 := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-invalid-paths2", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.PathSpec{ + BucketName: "non-existing-bucket", + S3InstanceRef: "s3-operator/default", + Paths: []string{"example", "non-existing"}, + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, pathResource, pathInvalidResource, pathInvalidResource2}) + + // Create the reconciler + reconciler := &path_controller.PathReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathResource.Name, Namespace: pathResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("no error on invalid resource", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathInvalidResource.Name, Namespace: pathInvalidResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("failed create path on non existing bucket", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathInvalidResource2.Name, Namespace: pathInvalidResource2.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} diff --git a/internal/controller/path/status.go b/internal/controller/path/status.go new file mode 100644 index 0000000..2456a8c --- /dev/null +++ b/internal/controller/path/status.go @@ -0,0 +1,48 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *PathReconciler) SetReconciledCondition( + ctx context.Context, + req ctrl.Request, + pathResource *s3v1alpha1.Path, + reason string, + message string, + err error, +) (ctrl.Result, error) { + return r.ControllerHelper.SetReconciledCondition( + ctx, + r.Status(), + req, + pathResource, + &pathResource.Status.Conditions, + s3v1alpha1.ConditionReconciled, + reason, + message, + err, + r.ReconcilePeriod, + ) + +} diff --git a/internal/controller/policy/constants.go b/internal/controller/policy/constants.go new file mode 100644 index 0000000..397d2f6 --- /dev/null +++ b/internal/controller/policy/constants.go @@ -0,0 +1,19 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller + +const policyFinalizer = "s3.onyxia.sh/finalizer" diff --git a/internal/controller/policy/controller.go b/internal/controller/policy/controller.go new file mode 100644 index 0000000..01be910 --- /dev/null +++ b/internal/controller/policy/controller.go @@ -0,0 +1,65 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller + +import ( + "time" + + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies/finalizers,verbs=update + +// PolicyReconciler reconciles a Policy object +type PolicyReconciler struct { + client.Client + Scheme *runtime.Scheme + ReconcilePeriod time.Duration + S3factory s3factory.S3Factory + ControllerHelper *helpers.ControllerHelper + S3Instancehelper *helpers.S3InstanceHelper +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&s3v1alpha1.Policy{}). + // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Only reconcile if generation has changed + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 10}). + Complete(r) +} diff --git a/internal/controller/policy/finalizer.go b/internal/controller/policy/finalizer.go new file mode 100644 index 0000000..dd9d015 --- /dev/null +++ b/internal/controller/policy/finalizer.go @@ -0,0 +1,131 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/minio/madmin-go/v3" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *PolicyReconciler) finalizePolicy( + ctx context.Context, + policyResource *s3v1alpha1.Policy, +) error { + logger := log.FromContext(ctx) + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + policyResource.Name, + policyResource.Namespace, + policyResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return err + } + if s3Client.GetConfig().PolicyDeletionEnabled { + return s3Client.DeletePolicy(policyResource.Spec.Name) + } + return nil +} + +func (r *PolicyReconciler) handleDeletion( + ctx context.Context, + req reconcile.Request, + policyResource *s3v1alpha1.Policy, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + if controllerutil.ContainsFinalizer(policyResource, policyFinalizer) { + // Run finalization logic for policyFinalizer. If the + // finalization logic fails, don't remove the finalizer so + // that we can retry during the next reconciliation. + if err := r.finalizePolicy(ctx, policyResource); err != nil { + logger.Error( + err, + "An error occurred when attempting to finalize the policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.DeletionFailure, + "Policy deletion has failed", + err, + ) + } + + if ok := controllerutil.RemoveFinalizer(policyResource, policyFinalizer); !ok { + logger.Info( + "Failed to remove finalizer for policyResource", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + if err := r.Update(ctx, policyResource); err != nil { + logger.Error( + err, + "an error occurred when removing finalizer from policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil +} + +func (r *PolicyReconciler) isPolicyMatchingWithCustomResource( + policyResource *s3v1alpha1.Policy, + effectivePolicy *madmin.PolicyInfo, +) (bool, error) { + // The policy content visible in the custom resource usually contains indentations and newlines + // while the one we get from S3 is compacted. In order to compare them, we compact the former. + policyResourceAsByteSlice := []byte(policyResource.Spec.PolicyContent) + buffer := new(bytes.Buffer) + err := json.Compact(buffer, policyResourceAsByteSlice) + if err != nil { + return false, err + } + + // Another gotcha is that the effective policy comes up as a json.RawContent, + // which needs marshalling in order to be properly compared to the []byte we get from the CR. + marshalled, err := json.Marshal(effectivePolicy.Policy) + if err != nil { + return false, err + } + return bytes.Equal(buffer.Bytes(), marshalled), nil +} diff --git a/internal/controller/policy/finalizer_test.go b/internal/controller/policy/finalizer_test.go new file mode 100644 index 0000000..4f4a24f --- /dev/null +++ b/internal/controller/policy/finalizer_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller_test + +import ( + "context" + "testing" + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + policy_controller "github.com/InseeFrLab/s3-operator/internal/controller/policy" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleDelete(t *testing.T) { + + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + policyResource := &s3v1alpha1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-policy", + Namespace: "default", + Generation: 1, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.PolicySpec{ + Name: "example-policy", + S3InstanceRef: "s3-operator/default", + PolicyContent: "", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, policyResource}) + + // Create the reconciler + reconciler := &policy_controller.PolicyReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: policyResource.Name, Namespace: policyResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("ressource have been deleted", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: policyResource.Name, Namespace: policyResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + policy := &s3v1alpha1.Policy{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "example-policy", + }, policy) + assert.NotNil(t, err) + assert.ErrorContains(t, err, "policies.s3.onyxia.sh \"example-policy\" not found") + }) + +} diff --git a/internal/controller/policy/reconcile.go b/internal/controller/policy/reconcile.go new file mode 100644 index 0000000..678f06f --- /dev/null +++ b/internal/controller/policy/reconcile.go @@ -0,0 +1,389 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller + +import ( + "context" + "fmt" + + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *PolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Checking for policy resource existence + policyResource := &s3v1alpha1.Policy{} + err := r.Get(ctx, req.NamespacedName, policyResource) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + "The Policy custom resource has been removed ; as such the Policy controller is NOOP.", + "req.Name", + req.Name, + ) + return ctrl.Result{}, nil + } + logger.Error( + err, + "An error occurred when attempting to read the Policy resource from the Kubernetes cluster", + ) + return ctrl.Result{}, err + } + + // Let's just set the status as Unknown when no status are available + if len(policyResource.Status.Conditions) == 0 { + meta.SetStatusCondition( + &policyResource.Status.Conditions, + metav1.Condition{ + Type: s3v1alpha1.ConditionReconciled, + Status: metav1.ConditionUnknown, + ObservedGeneration: policyResource.Generation, + Reason: s3v1alpha1.Reconciling, + Message: "Starting reconciliation", + }, + ) + if err = r.Status().Update(ctx, policyResource); err != nil { + logger.Error( + err, + "Failed to update bucketRessource status", + "bucketName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the policyResource Custom Resource after update the status + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, policyResource); err != nil { + logger.Error( + err, + "Failed to re-fetch policyResource", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(policyResource, policyFinalizer) { + logger.Info("Adding finalizer to policy resource", "PolicyName", + policyResource.Spec.Name, "NamespacedName", req.NamespacedName.String()) + if ok := controllerutil.AddFinalizer(policyResource, policyFinalizer); !ok { + logger.Error( + err, + "Failed to add finalizer into policy resource", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + err = r.Update(ctx, policyResource) + if err != nil { + logger.Error( + err, + "An error occurred when adding finalizer from policyResource", + "policyResource", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the policy Custom Resource after adding the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, policyResource); err != nil { + logger.Error( + err, + "Failed to re-fetch policyResource", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Managing policy deletion with a finalizer + // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources + if policyResource.GetDeletionTimestamp() != nil { + logger.Info("policyResource have been marked for deletion", "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.handleDeletion(ctx, req, policyResource) + } + + // Policy lifecycle management (other than deletion) starts here + return r.handleReconciliation(ctx, req, policyResource) + +} + +func (r *PolicyReconciler) handleReconciliation( + ctx context.Context, + req reconcile.Request, + policyResource *s3v1alpha1.Policy, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + policyResource.Name, + policyResource.Namespace, + policyResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Check policy existence on the S3 server + effectivePolicy, err := s3Client.GetPolicyInfo(policyResource.Spec.Name) + if err != nil { + logger.Error( + err, + "An error occurred while checking the existence of a policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Error while checking if policy already exist", + err, + ) + } + + if effectivePolicy == nil { + return r.handleCreation(ctx, req, policyResource) + } + + // If the policy exists on S3, we compare its state to the custom resource that spawned it on K8S + return r.handleUpdate(ctx, req, policyResource) +} + +func (r *PolicyReconciler) handleUpdate( + ctx context.Context, + req reconcile.Request, + policyResource *s3v1alpha1.Policy, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + policyResource.Name, + policyResource.Namespace, + policyResource.Spec.S3InstanceRef, + ) + + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Check policy existence on the S3 server + effectivePolicy, err := s3Client.GetPolicyInfo(policyResource.Spec.Name) + if err != nil { + logger.Error( + err, + "An error occurred while checking the existence of a policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Error while checking if policy already exist", + err, + ) + } + + matching, err := r.isPolicyMatchingWithCustomResource(policyResource, effectivePolicy) + if err != nil { + logger.Error( + err, + "An error occurred while comparing actual and expected configuration for the policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "The comparison between the effective policy [%s] on S3 and its corresponding custom resource on K8S has failed", + policyResource.Spec.Name, + ), + err, + ) + } + + if !matching { + // If not we update the policy to match the CR + err = s3Client.CreateOrUpdatePolicy( + policyResource.Spec.Name, + policyResource.Spec.PolicyContent, + ) + if err != nil { + logger.Error( + err, + "An error occurred while updating the policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "The comparison between the effective policy [%s] on S3 and its corresponding custom resource on K8S has failed", + policyResource.Spec.Name, + ), + err, + ) + } + } + + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Reconciled, + "Policy reconciled", + nil, + ) +} + +func (r *PolicyReconciler) handleCreation(ctx context.Context, req reconcile.Request, + policyResource *s3v1alpha1.Policy) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + policyResource.Name, + policyResource.Namespace, + policyResource.Spec.S3InstanceRef, + ) + + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + err = s3Client.CreateOrUpdatePolicy( + policyResource.Spec.Name, + policyResource.Spec.PolicyContent, + ) + + if err != nil { + logger.Error( + err, + "An error occurred while creating the policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Error while creating policy", + err, + ) + } + + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Reconciled, + "Policy reconciled", + err, + ) +} diff --git a/internal/controller/policy/reconcile_test.go b/internal/controller/policy/reconcile_test.go new file mode 100644 index 0000000..1b363d2 --- /dev/null +++ b/internal/controller/policy/reconcile_test.go @@ -0,0 +1,169 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller_test + +import ( + "context" + "testing" + + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + policy_controller "github.com/InseeFrLab/s3-operator/internal/controller/policy" + "github.com/stretchr/testify/assert" +) + +func TestHandleCreate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + policyResource := &s3v1alpha1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-policy", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.PolicySpec{ + Name: "example-policy", + S3InstanceRef: "s3-operator/default", + PolicyContent: "", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, policyResource}) + + // Create the reconciler + reconciler := &policy_controller.PolicyReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: policyResource.Name, Namespace: policyResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} + +func TestHandleUpdate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + existingValidPolicy := []byte(`{ +"Version": "2012-10-17", +"Statement": [ +{ + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": "arn:aws:s3:::my-bucket/*" +} +] +}`) + + existingInvalidPolicy := []byte(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": "arn:aws:s3:::my-bucket2/*" + } + ] + }`) + + // Create a fake client with a sample CR + policyResource := &s3v1alpha1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-policy", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.PolicySpec{ + Name: "existing-policy", + S3InstanceRef: "s3-operator/default", + PolicyContent: string(existingValidPolicy), + }, + } + + // Create a fake client with a sample CR + policyInvalidResource := &s3v1alpha1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-invalid-policy", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.PolicySpec{ + Name: "existing-policy", + S3InstanceRef: "s3-operator/default", + PolicyContent: string(existingInvalidPolicy), + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, policyResource, policyInvalidResource}) + + // Create the reconciler + reconciler := &policy_controller.PolicyReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: policyResource.Name, Namespace: policyResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: policyInvalidResource.Name, Namespace: policyInvalidResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} diff --git a/internal/controller/policy/status.go b/internal/controller/policy/status.go new file mode 100644 index 0000000..ba780b7 --- /dev/null +++ b/internal/controller/policy/status.go @@ -0,0 +1,47 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *PolicyReconciler) SetReconciledCondition( + ctx context.Context, + req ctrl.Request, + policyResource *s3v1alpha1.Policy, + reason string, + message string, + err error, +) (ctrl.Result, error) { + return r.ControllerHelper.SetReconciledCondition( + ctx, + r.Status(), + req, + policyResource, + &policyResource.Status.Conditions, + s3v1alpha1.ConditionReconciled, + reason, + message, + err, + r.ReconcilePeriod, + ) +} diff --git a/internal/controller/s3instance/constants.go b/internal/controller/s3instance/constants.go new file mode 100644 index 0000000..8df1ac6 --- /dev/null +++ b/internal/controller/s3instance/constants.go @@ -0,0 +1,21 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller + +const ( + s3InstanceFinalizer = "s3.onyxia.sh/finalizer" +) diff --git a/internal/controller/s3instance/controller.go b/internal/controller/s3instance/controller.go new file mode 100644 index 0000000..bbd8fc2 --- /dev/null +++ b/internal/controller/s3instance/controller.go @@ -0,0 +1,66 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller + +import ( + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3instances,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3instances/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3instances/finalizers,verbs=update + +// S3InstanceReconciler reconciles a S3Instance object +type S3InstanceReconciler struct { + client.Client + Scheme *runtime.Scheme + ReconcilePeriod time.Duration + S3factory s3factory.S3Factory + ControllerHelper *helpers.ControllerHelper + S3Instancehelper *helpers.S3InstanceHelper +} + +// SetupWithManager sets up the controller with the Manager.* +func (r *S3InstanceReconciler) SetupWithManager(mgr ctrl.Manager) error { + // filterLogger := ctrl.Log.WithName("filterEvt") + return ctrl.NewControllerManagedBy(mgr). + For(&s3v1alpha1.S3Instance{}). + // See : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ + WithEventFilter(predicate.Funcs{ + // Ignore updates to CR status in which case metadata.Generation does not change, + // unless it is a change to the underlying Secret + UpdateFunc: func(e event.UpdateEvent) bool { + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 10}). + Complete(r) +} diff --git a/internal/controller/s3instance/finalizer.go b/internal/controller/s3instance/finalizer.go new file mode 100644 index 0000000..2a5dae4 --- /dev/null +++ b/internal/controller/s3instance/finalizer.go @@ -0,0 +1,138 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller + +import ( + "context" + "fmt" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *S3InstanceReconciler) handleS3InstanceDeletion( + ctx context.Context, + req ctrl.Request, + s3InstanceResource *s3v1alpha1.S3Instance, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + if controllerutil.ContainsFinalizer(s3InstanceResource, s3InstanceFinalizer) { + logger.Info( + "Performing Finalizer Operations for S3Instance before delete CR", + "Namespace", + s3InstanceResource.GetNamespace(), + "Name", + s3InstanceResource.GetName(), + ) + + // Vérifier les références existantes + if err := r.checkS3InstanceReferences(ctx, s3InstanceResource); err != nil { + return ctrl.Result{}, err + } + + //Remove s3InstanceFinalizer. Once all finalizers have been removed, the object will be deleted. + if ok := controllerutil.RemoveFinalizer(s3InstanceResource, s3InstanceFinalizer); !ok { + logger.Info( + "Failed to remove finalizer for S3Instance", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + // Let's re-fetch the S3Instance Custom Resource after removing the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Update(ctx, s3InstanceResource); err != nil { + logger.Error( + err, + "Failed to remove finalizer for S3Instance", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil +} + +// checkS3InstanceReferences vérifie si l'instance S3 est encore utilisée +func (r *S3InstanceReconciler) checkS3InstanceReferences(ctx context.Context, s3Instance *s3v1alpha1.S3Instance) error { + // Liste des types de ressources à vérifier + references := map[string]client.ObjectList{ + "Buckets": &s3v1alpha1.BucketList{}, + "Policies": &s3v1alpha1.PolicyList{}, + "Paths": &s3v1alpha1.PathList{}, + "S3Users": &s3v1alpha1.S3UserList{}, + } + + for name, list := range references { + if err := r.List(ctx, list); err != nil { + return fmt.Errorf("échec de la récupération des %s : %w", name, err) + } + + if found := r.countReferences(list, s3Instance); found > 0 { + return fmt.Errorf("impossible de supprimer S3Instance, %d %s utilisent cette instance", found, name) + } + } + return nil +} + +// countReferences compte les objets faisant référence à un S3Instance +func (r *S3InstanceReconciler) countReferences(list client.ObjectList, s3Instance *s3v1alpha1.S3Instance) int { + count := 0 + switch objects := list.(type) { + case *s3v1alpha1.BucketList: + for _, obj := range objects.Items { + if r.S3Instancehelper.GetS3InstanceRefInfo(obj.Spec.S3InstanceRef, obj.Namespace). + String() == fmt.Sprintf("%s/%s", s3Instance.Namespace, s3Instance.Name) { + count++ + } + } + case *s3v1alpha1.PathList: + for _, obj := range objects.Items { + if r.S3Instancehelper.GetS3InstanceRefInfo(obj.Spec.S3InstanceRef, obj.Namespace). + String() == fmt.Sprintf("%s/%s", s3Instance.Namespace, s3Instance.Name) { + count++ + } + } + case *s3v1alpha1.S3UserList: + for _, obj := range objects.Items { + if r.S3Instancehelper.GetS3InstanceRefInfo(obj.Spec.S3InstanceRef, obj.Namespace). + String() == fmt.Sprintf("%s/%s", s3Instance.Namespace, s3Instance.Name) { + count++ + } + } + case *s3v1alpha1.PolicyList: + for _, obj := range objects.Items { + if r.S3Instancehelper.GetS3InstanceRefInfo(obj.Spec.S3InstanceRef, obj.Namespace). + String() == fmt.Sprintf("%s/%s", s3Instance.Namespace, s3Instance.Name) { + count++ + } + } + } + + return count +} diff --git a/internal/controller/s3instance/finalizer_test.go b/internal/controller/s3instance/finalizer_test.go new file mode 100644 index 0000000..3ef63d0 --- /dev/null +++ b/internal/controller/s3instance/finalizer_test.go @@ -0,0 +1,335 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller_test + +import ( + "context" + "testing" + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + s3instance_controller "github.com/InseeFrLab/s3-operator/internal/controller/s3instance" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleDelete(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + t.Run("no error", func(t *testing.T) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("error if one bucket ressource still use it", func(t *testing.T) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + bucketResource := &s3v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + Namespace: "default", + }, + Spec: s3v1alpha1.BucketSpec{ + Name: "bucket", + S3InstanceRef: "s3-operator/default", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, bucketResource}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.Error(t, err) + assert.EqualErrorf(t, err, "impossible de supprimer S3Instance, 1 Buckets utilisent cette instance", err.Error()) + }) + + t.Run("error if one policy ressource still use it", func(t *testing.T) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + policyResource := &s3v1alpha1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "policy", + Namespace: "default", + }, + Spec: s3v1alpha1.PolicySpec{ + S3InstanceRef: "s3-operator/default", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, policyResource}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.Error(t, err) + assert.EqualErrorf(t, err, "impossible de supprimer S3Instance, 1 Policies utilisent cette instance", err.Error()) + }) + + t.Run("error if one path ressource still use it", func(t *testing.T) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + pathResource := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "path", + Namespace: "default", + }, + Spec: s3v1alpha1.PathSpec{ + S3InstanceRef: "s3-operator/default", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, pathResource}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.Error(t, err) + assert.EqualErrorf(t, err, "impossible de supprimer S3Instance, 1 Paths utilisent cette instance", err.Error()) + }) + + t.Run("error if one user ressource still use it", func(t *testing.T) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + userResource := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "user", + Namespace: "default", + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, userResource}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.Error(t, err) + assert.EqualErrorf(t, err, "impossible de supprimer S3Instance, 1 S3Users utilisent cette instance", err.Error()) + }) +} diff --git a/internal/controller/s3instance/reconcile.go b/internal/controller/s3instance/reconcile.go new file mode 100644 index 0000000..548cf3e --- /dev/null +++ b/internal/controller/s3instance/reconcile.go @@ -0,0 +1,201 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller + +import ( + "context" + "fmt" + + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *S3InstanceReconciler) Reconcile( + ctx context.Context, + req ctrl.Request, +) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Checking for s3InstanceResource existence + s3InstanceResource := &s3v1alpha1.S3Instance{} + err := r.Get(ctx, req.NamespacedName, s3InstanceResource) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + fmt.Sprintf("The S3InstanceResource CR %s has been removed. NOOP", req.Name), + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, nil + } + logger.Error( + err, + "Failed to get S3InstanceResource", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's just set the status as Unknown when no status are available + if len(s3InstanceResource.Status.Conditions) == 0 { + meta.SetStatusCondition( + &s3InstanceResource.Status.Conditions, + metav1.Condition{ + Type: s3v1alpha1.ConditionReconciled, + Status: metav1.ConditionUnknown, + ObservedGeneration: s3InstanceResource.Generation, + Reason: s3v1alpha1.Reconciling, + Message: "Starting reconciliation", + }, + ) + if err = r.Status().Update(ctx, s3InstanceResource); err != nil { + logger.Error( + err, + "Failed to update s3InstanceResource status", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the s3InstanceResource Custom Resource after update the status + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, s3InstanceResource); err != nil { + logger.Error( + err, + "Failed to re-fetch s3Instance", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(s3InstanceResource, s3InstanceFinalizer) { + logger.Info("Adding finalizer to s3Instance", "NamespacedName", req.NamespacedName.String()) + if ok := controllerutil.AddFinalizer(s3InstanceResource, s3InstanceFinalizer); !ok { + logger.Error( + err, + "Failed to add finalizer into the s3Instance", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + if err = r.Update(ctx, s3InstanceResource); err != nil { + logger.Error( + err, + "an error occurred when adding finalizer on s3Instance", + "s3Instance", + s3InstanceResource.Name, + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the S3Instance Custom Resource after adding the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, s3InstanceResource); err != nil { + logger.Error( + err, + "Failed to re-fetch s3Instance", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + } + + // Check if the s3InstanceResource instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. The object will be deleted. + if s3InstanceResource.GetDeletionTimestamp() != nil { + logger.Info("s3InstanceResource have been marked for deletion") + return r.handleS3InstanceDeletion(ctx, req, s3InstanceResource) + } + + // Reconciliation starts here + return r.handleReconciliation(ctx, req, s3InstanceResource) + +} + +func (r *S3InstanceReconciler) handleReconciliation( + ctx context.Context, + req reconcile.Request, + s3InstanceResource *s3v1alpha1.S3Instance, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientFromS3Instance(ctx, r.Client, r.S3factory, s3InstanceResource) + + if err != nil { + logger.Error( + err, + "Could not generate s3Instance", + "s3InstanceSecretRefName", + s3InstanceResource.Spec.SecretRef, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition(ctx, req, s3InstanceResource, s3v1alpha1.Unreachable, + "Failed to generate S3Instance ", err) + } + + _, err = s3Client.ListBuckets() + if err != nil { + logger.Error( + err, + "Could not generate s3Instance", + "s3InstanceName", + s3InstanceResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition(ctx, req, s3InstanceResource, s3v1alpha1.CreationFailure, + "Failed to generate S3Instance ", err) + } + + return r.SetReconciledCondition( + ctx, + req, + s3InstanceResource, + s3v1alpha1.Reconciled, + "S3Instance instance reconciled", + nil, + ) + +} diff --git a/internal/controller/s3instance/reconcile_test.go b/internal/controller/s3instance/reconcile_test.go new file mode 100644 index 0000000..94ee103 --- /dev/null +++ b/internal/controller/s3instance/reconcile_test.go @@ -0,0 +1,156 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller_test + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + s3instance_controller "github.com/InseeFrLab/s3-operator/internal/controller/s3instance" + "github.com/stretchr/testify/assert" +) + +func TestHandleCreate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + s3instanceResourceInvalid := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.invalid.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-instance", + Namespace: "s3-operator", + }, + } + + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3instanceResourceInvalid}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("finalizer is added", func(t *testing.T) { + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + + // FetchReconciledInstance + reconciledInstance := &s3v1alpha1.S3Instance{} + _ = testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "s3-operator", + Name: "default", + }, reconciledInstance) + + assert.Equal(t, "s3.onyxia.sh/finalizer", reconciledInstance.Finalizers[0]) + }) + + t.Run("status is reconciled", func(t *testing.T) { + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + + // FetchReconciledInstance + reconciledInstance := &s3v1alpha1.S3Instance{} + _ = testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "s3-operator", + Name: "default", + }, reconciledInstance) + + assert.Equal(t, "Reconciled", reconciledInstance.Status.Conditions[0].Reason) + }) + + t.Run("reason is creation failure because of invalid client", func(t *testing.T) { + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResourceInvalid.Name, Namespace: s3instanceResourceInvalid.Namespace}} + reconciler.Reconcile(context.TODO(), req) + + // 4️⃣ FetchReconciledInstance + reconciledInstance := &s3v1alpha1.S3Instance{} + _ = testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "s3-operator", + Name: "invalid-instance", + }, reconciledInstance) + + assert.Equal(t, "CreationFailure", reconciledInstance.Status.Conditions[0].Reason) + }) +} diff --git a/internal/controller/s3instance/status.go b/internal/controller/s3instance/status.go new file mode 100644 index 0000000..ad82e68 --- /dev/null +++ b/internal/controller/s3instance/status.go @@ -0,0 +1,47 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *S3InstanceReconciler) SetReconciledCondition( + ctx context.Context, + req ctrl.Request, + s3InstanceResource *s3v1alpha1.S3Instance, + reason string, + message string, + err error, +) (ctrl.Result, error) { + return r.ControllerHelper.SetReconciledCondition( + ctx, + r.Status(), + req, + s3InstanceResource, + &s3InstanceResource.Status.Conditions, + s3v1alpha1.ConditionReconciled, + reason, + message, + err, + r.ReconcilePeriod, + ) +} diff --git a/controllers/suite_test.go b/internal/controller/suite_test.go similarity index 73% rename from controllers/suite_test.go rename to internal/controller/suite_test.go index a4ae390..53390d2 100644 --- a/controllers/suite_test.go +++ b/internal/controller/suite_test.go @@ -14,10 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( + "fmt" "path/filepath" + "runtime" "testing" . "github.com/onsi/ginkgo/v2" @@ -41,7 +43,7 @@ var cfg *rest.Config var k8sClient client.Client var testEnv *envtest.Environment -func TestAPIs(t *testing.T) { +func TestControllers(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") @@ -52,8 +54,16 @@ var _ = BeforeSuite(func() { By("bootstrapping test environment") testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.29.0-%s-%s", runtime.GOOS, runtime.GOARCH)), } var err error diff --git a/internal/controller/user/constants.go b/internal/controller/user/constants.go new file mode 100644 index 0000000..650d1d0 --- /dev/null +++ b/internal/controller/user/constants.go @@ -0,0 +1,21 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +const ( + userFinalizer = "s3.onyxia.sh/userFinalizer" +) diff --git a/internal/controller/user/controller.go b/internal/controller/user/controller.go new file mode 100644 index 0000000..717ceb4 --- /dev/null +++ b/internal/controller/user/controller.go @@ -0,0 +1,93 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +import ( + "time" + + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3users,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3users/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3users/finalizers,verbs=update +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete +// +kubebuilder:rbac:groups="",resources=secrets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="",resources=secrets/finalizers,verbs=update + +// S3UserReconciler reconciles a S3User object +type S3UserReconciler struct { + client.Client + Scheme *runtime.Scheme + OverrideExistingSecret bool + ReconcilePeriod time.Duration + S3factory s3factory.S3Factory + ControllerHelper *helpers.ControllerHelper + S3Instancehelper *helpers.S3InstanceHelper + PasswordGeneratorHelper *helpers.PasswordGenerator +} + +// SetupWithManager sets up the controller with the Manager.* +func (r *S3UserReconciler) SetupWithManager(mgr ctrl.Manager) error { + // filterLogger := ctrl.Log.WithName("filterEvt") + return ctrl.NewControllerManagedBy(mgr). + For(&s3v1alpha1.S3User{}). + // The "secret owning" implies the reconcile loop will be called whenever a Secret owned + // by a S3User is created/updated/deleted. In other words, even when creating a single S3User, + // there is going to be several iterations. + Owns(&corev1.Secret{}). + // See : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ + WithEventFilter(predicate.Funcs{ + + // Ignore updates to CR status in which case metadata.Generation does not change, + // unless it is a change to the underlying Secret + UpdateFunc: func(e event.UpdateEvent) bool { + + // To check if the update event is tied to a change on secret, + // we try to cast e.ObjectNew to a secret (only if it's not a S3User, which + // should prevent any TypeAssertionError based panic). + secretUpdate := false + newUser, _ := e.ObjectNew.(*s3v1alpha1.S3User) + if newUser == nil { + secretUpdate = (e.ObjectNew.(*corev1.Secret) != nil) + } + + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() || secretUpdate + }, + // Ignore create events caused by the underlying secret's creation + CreateFunc: func(e event.CreateEvent) bool { + user, _ := e.Object.(*s3v1alpha1.S3User) + return user != nil + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 10}). + Complete(r) +} diff --git a/internal/controller/user/finalizer.go b/internal/controller/user/finalizer.go new file mode 100644 index 0000000..863859f --- /dev/null +++ b/internal/controller/user/finalizer.go @@ -0,0 +1,151 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *S3UserReconciler) finalizeS3User( + ctx context.Context, + userResource *s3v1alpha1.S3User, +) error { + logger := log.FromContext(ctx) + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + userResource.Name, + userResource.Namespace, + userResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return err + } + if s3Client.GetConfig().S3UserDeletionEnabled { + return s3Client.DeleteUser(userResource.Spec.AccessKey) + } + return nil +} + +func (r *S3UserReconciler) handleDeletion( + ctx context.Context, + req reconcile.Request, + userResource *s3v1alpha1.S3User, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + if controllerutil.ContainsFinalizer(userResource, userFinalizer) { + // Run finalization logic for S3UserFinalizer. If the finalization logic fails, don't remove the finalizer so that we can retry during the next reconciliation. + if err := r.finalizeS3User(ctx, userResource); err != nil { + logger.Error( + err, + "An error occurred when attempting to finalize the user", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.DeletionFailure, + "user deletion has failed", + err, + ) + } + + err := r.deleteOldLinkedSecret(ctx, userResource) + if err != nil { + logger.Error( + err, + "An error occurred when trying to clean old secret linked to user", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.DeletionFailure, + "Deletion of old secret associated to user have failed", + err, + ) + } + + userOwnedSecret, _ := r.getUserSecret(ctx, userResource) + if err := r.deleteSecret(ctx, &userOwnedSecret); err != nil { + logger.Error( + err, + "An error occurred when trying to clean secret linked to user", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.DeletionFailure, + "Deletion of secret associated to user have failed", + err, + ) + } + + //Remove userFinalizer. Once all finalizers have been removed, the object will be deleted. + if ok := controllerutil.RemoveFinalizer(userResource, userFinalizer); !ok { + logger.Info( + "Failed to remove finalizer for user resource", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + // Unsure why the behavior is different to that of bucket/policy/path controllers, but it appears + // calling r.Update() for adding/removal of finalizer is not necessary (an update event is generated + // with the call to AddFinalizer/RemoveFinalizer), and worse, causes "freshness" problem (with the + // "the object has been modified; please apply your changes to the latest version and try again" error) + err = r.Update(ctx, userResource) + if err != nil { + logger.Error( + err, + "An error occurred when removing finalizer from policy", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil +} diff --git a/internal/controller/user/finalizer_test.go b/internal/controller/user/finalizer_test.go new file mode 100644 index 0000000..9803fa4 --- /dev/null +++ b/internal/controller/user/finalizer_test.go @@ -0,0 +1,133 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +package user_controller_test + +import ( + "context" + "testing" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + user_controller "github.com/InseeFrLab/s3-operator/internal/controller/user" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleDelete(t *testing.T) { + + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + s3UserResource := &s3v1alpha1.S3User{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "s3.onyxia.sh/v1alpha1", + Kind: "S3User", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/userFinalizer"}, + UID: "6c8dceca-f7df-469d-80a5-1afed9e4d710", + }, + Spec: s3v1alpha1.S3UserSpec{ + AccessKey: "existing-valid-user", + Policies: []string{"admin"}, + SecretName: "existing-valid-user-credentials", + S3InstanceRef: "s3-operator/default", + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + + blockOwnerDeletion := true + controller := true + s3UserSecretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user-credentials", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: s3UserResource.APIVersion, + Kind: s3UserResource.Kind, + Name: s3UserResource.Name, + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &controller, + UID: s3UserResource.UID, + }, + }, + }, + Data: map[string][]byte{ + "accessKey": []byte("existing-valid-user"), + "secretKey": []byte("validSecret"), + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3UserResource, s3UserSecretResource}) + + // Create the reconciler + reconciler := &user_controller.S3UserReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + testUtils.Client.Delete(context.TODO(), s3UserResource) + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("ressource have been deleted", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + s3UserResource := &s3v1alpha1.S3User{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "example-user", + }, s3UserResource) + assert.NotNil(t, err) + assert.ErrorContains(t, err, "s3users.s3.onyxia.sh \"example-user\" not found") + + s3UserSecret := &corev1.Secret{} + err = testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "existing-valid-user-credentials", + }, s3UserSecret) + assert.NotNil(t, err) + assert.ErrorContains(t, err, "secrets \"existing-valid-user-credentials\" not found") + + }) + +} diff --git a/internal/controller/user/reconcile.go b/internal/controller/user/reconcile.go new file mode 100644 index 0000000..e125805 --- /dev/null +++ b/internal/controller/user/reconcile.go @@ -0,0 +1,882 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +import ( + "context" + "fmt" + "slices" + + corev1 "k8s.io/api/core/v1" + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *S3UserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Checking for userResource existence + userResource := &s3v1alpha1.S3User{} + err := r.Get(ctx, req.NamespacedName, userResource) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + fmt.Sprintf( + "The S3User CR %s (or its owned Secret) has been removed. NOOP", + req.Name, + ), + ) + return ctrl.Result{}, nil + } + logger.Error(err, "An error occurred when fetching the S3User from Kubernetes") + return ctrl.Result{}, err + } + + // Let's just set the status as Unknown when no status are available + if len(userResource.Status.Conditions) == 0 { + meta.SetStatusCondition( + &userResource.Status.Conditions, + metav1.Condition{ + Type: s3v1alpha1.ConditionReconciled, + Status: metav1.ConditionUnknown, + ObservedGeneration: userResource.Generation, + Reason: s3v1alpha1.Reconciling, + Message: "Starting reconciliation", + }, + ) + if err = r.Status().Update(ctx, userResource); err != nil { + logger.Error( + err, + "Failed to update userRessource status", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the userResource Custom Resource after update the status + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, userResource); err != nil { + logger.Error( + err, + "Failed to re-fetch userResource", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(userResource, userFinalizer) { + logger.Info("Adding finalizer to user resource", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + + if ok := controllerutil.AddFinalizer(userResource, userFinalizer); !ok { + logger.Error( + err, + "Failed to add finalizer into user resource", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + if err = r.Update(ctx, userResource); err != nil { + logger.Error( + err, + "An error occurred when adding finalizer from user", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the userResource Custom Resource after adding the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, userResource); err != nil { + logger.Error( + err, + "Failed to re-fetch userResource", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Check if the userResource instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. The object will be deleted. + if userResource.GetDeletionTimestamp() != nil { + logger.Info("userResource have been marked for deletion", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.handleDeletion(ctx, req, userResource) + } + + return r.handleReconciliation(ctx, req, userResource) + +} + +func (r *S3UserReconciler) handleReconciliation( + ctx context.Context, + req reconcile.Request, + userResource *s3v1alpha1.S3User, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + userResource.Name, + userResource.Namespace, + userResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + found, err := s3Client.UserExist(userResource.Spec.AccessKey) + if err != nil { + logger.Error( + err, + "An error occurred while checking the existence of a user", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Fail to check existence", + err, + ) + } + + if !found { + return r.handleCreate(ctx, req, userResource) + } + return r.handleUpdate(ctx, req, userResource) +} + +func (r *S3UserReconciler) handleUpdate( + ctx context.Context, + req reconcile.Request, + userResource *s3v1alpha1.S3User, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + // Create S3Client + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + userResource.Name, + userResource.Namespace, + userResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + err = r.deleteOldLinkedSecret(ctx, userResource) + if err != nil { + logger.Error( + err, + "An error occurred when trying to clean old secret linked to user", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Deletion of old secret associated to user have failed", + err, + ) + } + + userOwnedSecret, err := r.getUserSecret(ctx, userResource) + if err != nil { + if err.Error() == "SecretListingFailed" { + logger.Error( + err, + "An error occurred when trying to obtain the user's secret", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + + err = r.deleteSecret(ctx, &userOwnedSecret) + if err != nil { + logger.Error( + err, + "Deletion of secret associated to user have failed", + "userResource", + userResource.Name, + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Deletion of secret associated to user have failed", + err, + ) + + } + err = s3Client.DeleteUser(userResource.Spec.AccessKey) + if err != nil { + logger.Error(err, "Could not delete user on S3 server", "userResource", + userResource.Name, + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "Deletion of S3user %s on S3 server has failed", + userResource.Name, + ), + err, + ) + + } + return r.handleCreate(ctx, req, userResource) + } else if err.Error() == "S3UserSecretNameMismatch" { + logger.Info("A secret with owner reference to the user was found, but its name doesn't match the spec. This is probably due to the S3User's spec changing (specifically spec.secretName being added, changed or removed). The \"old\" secret will be deleted.", "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + err = r.deleteSecret(ctx, &userOwnedSecret) + if err != nil { + logger.Error(err, "Deletion of secret associated to user have failed", "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Deletion of secret associated to user have failed", + err, + ) + + } + } + } + + if userOwnedSecret.Name == "" { + logger.Info( + "Secret associated to user not found, user will be deleted from the S3 backend, then recreated with a secret", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + + err = s3Client.DeleteUser(userResource.Spec.AccessKey) + if err != nil { + logger.Error(err, "Could not delete user on S3 server", "userResource", + userResource.Name, + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "Deletion of S3user %s on S3 server has failed", + userResource.Name, + ), + err, + ) + } + return r.handleCreate(ctx, req, userResource) + } + + logger.Info("Checking user policies", "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + userPolicies, err := s3Client.GetUserPolicies(userResource.Spec.AccessKey) + if err != nil { + logger.Error(err, "Could not check the user's policies", "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Checking the S3user policies has failed", + err, + ) + } + + policyToDelete := []string{} + policyToAdd := []string{} + for _, policy := range userPolicies { + policyFound := slices.Contains(userResource.Spec.Policies, policy) + if !policyFound { + logger.Info( + fmt.Sprintf("S3User policy definition doesn't contain policy %s", policy), + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + policyToDelete = append(policyToDelete, policy) + } + } + + for _, policy := range userResource.Spec.Policies { + policyFound := slices.Contains(userPolicies, policy) + if !policyFound { + logger.Info( + fmt.Sprintf("S3User policy definition must contain policy %s", policy), + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + policyToAdd = append(policyToAdd, policy) + } + } + + if len(policyToDelete) > 0 { + err = s3Client.RemovePoliciesFromUser(userResource.Spec.AccessKey, policyToDelete) + if err != nil { + logger.Error( + err, + "An error occurred while removing policy to user", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Error while updating policies of user", + err, + ) + } + } + + if len(policyToAdd) > 0 { + err := s3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policyToAdd) + if err != nil { + logger.Error( + err, + "An error occurred while adding policy to user", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Error while updating policies of user", + err, + ) + + } + } + + credentialsValid, err := s3Client.CheckUserCredentialsValid( + userResource.Name, + string(userOwnedSecret.Data[userResource.Spec.SecretFieldNameAccessKey]), + string(userOwnedSecret.Data[userResource.Spec.SecretFieldNameSecretKey]), + ) + + if err != nil { + logger.Error( + err, + "An error occurred when checking if user credentials were valid", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Checking credentials on S3 server has failed", + err, + ) + } + + if !credentialsValid { + logger.Info( + "The secret containing the credentials will be deleted, and the user will be deleted from the S3 backend, then recreated (through another reconcile)", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + err = r.deleteSecret(ctx, &userOwnedSecret) + if err != nil { + logger.Error(err, "Deletion of secret associated to user have failed", "userResource", + userResource.Name, + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Deletion of secret associated to user have failed", + err, + ) + + } + err = s3Client.DeleteUser(userResource.Spec.AccessKey) + if err != nil { + logger.Error(err, "Could not delete user on S3 server", "userResource", + userResource.Name, + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "Deletion of S3user %s on S3 server has failed", + userResource.Name, + ), + err, + ) + + } + return r.handleCreate(ctx, req, userResource) + } + + logger.Info("User was reconciled without error", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Reconciled, + "user reconciled", + err, + ) +} + +func (r *S3UserReconciler) handleCreate( + ctx context.Context, + req reconcile.Request, + userResource *s3v1alpha1.S3User, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + // Create S3Client + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + userResource.Name, + userResource.Namespace, + userResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Generating a random secret key + secretKey, err := r.PasswordGeneratorHelper.Generate(20, true, false, true) + if err != nil { + + logger.Error(err, fmt.Sprintf("Fail to generate password for user %s", userResource.Name), + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "An error occurred when attempting to generate password for user", + err, + ) + } + + // Create a new K8S Secret to hold the user's accessKey and secretKey + secret, err := r.newSecretForCR( + ctx, + userResource, + map[string][]byte{ + userResource.Spec.SecretFieldNameAccessKey: []byte(userResource.Spec.AccessKey), + userResource.Spec.SecretFieldNameSecretKey: []byte(secretKey)}, + ) + if err != nil { + // Error while creating the Kubernetes secret - requeue the request. + logger.Error(err, "Could not generate Kubernetes secret", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Generation of associated k8s Secret has failed", + err, + ) + } + + // For managing user creation, we first check if a Secret matching + // the user's spec (not matching the owner reference) exists + existingK8sSecret := &corev1.Secret{} + err = r.Get( + ctx, + types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}, + existingK8sSecret, + ) + + // If none exist : we create the user, then the secret + if err != nil && k8sapierrors.IsNotFound(err) { + logger.Info( + "No secret found ; creating a new Secret", + "Secret.Namespace", + secret.Namespace, + "Secret.Name", + secret.Name, + ) + + // Creating the user + err = s3Client.CreateUser(userResource.Spec.AccessKey, secretKey) + + if err != nil { + logger.Error( + err, + "An error occurred while creating user on S3 server", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Creation of user on S3 instance has failed", + err, + ) + } + + // Creating the secret + logger.Info( + "Creating a new secret to store the user's credentials", + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + err = r.Create(ctx, secret) + if err != nil { + logger.Error(err, "Could not create secret for user", + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Creation of secret for user has failed", + err, + ) + } + + // Add policies + err = r.addPoliciesToUser(ctx, userResource) + if err != nil { + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Error while updating policies of user on S3 instance", + err, + ) + } + + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Reconciled, + "User reconciled", + err, + ) + + } else if err != nil { + logger.Error(err, "Couldn't check secret existence", + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Fail to check if an existing secret already exist", + err, + ) + } else { + // If a secret already exists, but has a different S3User owner reference, then the creation should + // fail with no requeue, and use the status to inform that the spec should be changed + for _, ref := range existingK8sSecret.OwnerReferences { + if ref.Kind == "S3User" { + if ref.UID != userResource.UID { + logger.Error(fmt.Errorf(""), "The secret matching the new S3User's spec is owned by a different S3User.", + "conflictingUser", + ref.Name, + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.CreationFailure, + fmt.Sprintf("The secret matching the new S3User's spec is owned by a different, pre-existing S3User (%s). The S3User being created now (%s) won't be created on the S3 backend until its spec changes to target a different secret", ref.Name, userResource.Name), + err, + ) + } + } + } + + if r.OverrideExistingSecret { + // Case 3.2 : they are not valid, but the operator is configured to overwrite it + logger.Info(fmt.Sprintf("A secret with the name %s already exists ; it will be overwritten because of operator configuration", secret.Name), "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + + // Creating the user + err = s3Client.CreateUser(userResource.Spec.AccessKey, secretKey) + if err != nil { + logger.Error( + err, + "An error occurred while creating user on S3 server", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Creation of user on S3 instance has failed", + err, + ) + } + + // Updating the secret + logger.Info("Updating the pre-existing secret with new credentials", + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + err = r.Update(ctx, secret) + if err != nil { + logger.Error(err, "Could not update secret", "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Update of secret have failed", + err, + ) + } + + // Add policies + err = r.addPoliciesToUser(ctx, userResource) + if err != nil { + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Error while updating associated policy", + err, + ) + } + + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Reconciled, + "User Reconciled", + err, + ) + } + + // Case 3.3 : they are not valid, and the operator is configured keep the existing secret + // The user will not be created, with no requeue and with two possible ways out : either toggle + // OverrideExistingSecret on, or delete the S3User whose credentials are not working anyway. + logger.Error(fmt.Errorf(""), + "A secret with the same name already exists ; as the operator is configured to NOT override any pre-existing secrets, this user will not be created on S3 backend until spec change (to target new secret), or until the operator configuration is changed to override existing secrets", + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.CreationFailure, + "Creation of user on S3 instance has failed necause secret contains invalid credentials. The user's spec should be changed to target a different secret", + err, + ) + } +} diff --git a/internal/controller/user/reconcile_test.go b/internal/controller/user/reconcile_test.go new file mode 100644 index 0000000..9ba3c83 --- /dev/null +++ b/internal/controller/user/reconcile_test.go @@ -0,0 +1,302 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller_test + +import ( + "context" + "testing" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + user_controller "github.com/InseeFrLab/s3-operator/internal/controller/user" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleCreate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + s3UserResource := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-user", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + AccessKey: "example-user", + SecretName: "example-user-secret", + Policies: []string{"admin"}, + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + + // Create a fake client with a sample CR + s3UserUsingNotAllowedS3Instance := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-user", + Namespace: "unauthorized", + Generation: 1, + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + AccessKey: "example-user", + SecretName: "example-user-secret", + Policies: []string{"admin"}, + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3UserResource, s3UserUsingNotAllowedS3Instance}) + + // Create the reconciler + reconciler := &user_controller.S3UserReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("error if using invalidS3Instance", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserUsingNotAllowedS3Instance.Name, Namespace: s3UserUsingNotAllowedS3Instance.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NotNil(t, err) + }) + + t.Run("secret is created", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + + secretCreated := &corev1.Secret{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "example-user-secret", + }, secretCreated) + assert.NoError(t, err) + assert.Equal(t, "example-user", string(secretCreated.Data["accessKey"])) + assert.GreaterOrEqual(t, len(string(secretCreated.Data["secretKey"])), 20) + + }) +} + +func TestHandleUpdate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + t.Run("valid user", func(t *testing.T) { + // Create a fake client with a sample CR + s3UserResource := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/userFinalizer"}, + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + AccessKey: "existing-valid-user", + SecretName: "existing-valid-user-secret", + Policies: []string{"admin"}, + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + + secretS3UserResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "accessKey": []byte("existing-valid-user"), + "secretKey": []byte("validSecret"), + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3UserResource, secretS3UserResource}) + + // Create the reconciler + reconciler := &user_controller.S3UserReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + }) + + t.Run("invalid user password", func(t *testing.T) { + // Create a fake client with a sample CR + s3UserResource := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/userFinalizer"}, + UID: "6c8dceca-f7df-469d-80a5-1afed9e4d710", + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + AccessKey: "existing-valid-user", + SecretName: "existing-valid-user-secret", + Policies: []string{"admin"}, + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + blockOwnerDeletion := true + controller := true + secretS3UserResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user-secret", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: s3UserResource.APIVersion, + Kind: s3UserResource.Kind, + Name: s3UserResource.Name, + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &controller, + UID: s3UserResource.UID, + }, + }, + }, + Data: map[string][]byte{ + "accessKey": []byte("existing-valid-user"), + "secretKey": []byte("invalidSecret"), + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3UserResource, secretS3UserResource}) + + // Create the reconciler + reconciler := &user_controller.S3UserReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("secret have changed", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + + secretCreated := &corev1.Secret{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "existing-valid-user-secret", + }, secretCreated) + assert.NoError(t, err) + assert.Equal(t, "existing-valid-user", string(secretCreated.Data["accessKey"])) + assert.NotEqualValues(t, string(secretCreated.Data["secretKey"]), "invalidSecret") + }) + }) + + t.Run("invalid user policy", func(t *testing.T) { + // Create a fake client with a sample CR + s3UserResource := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/userFinalizer"}, + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + AccessKey: "existing-valid-user", + SecretName: "existing-valid-user", + Policies: []string{"admin", "missing-policy"}, + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + + secretS3UserResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "accessKey": []byte("existing-valid-user"), + "secretKey": []byte("validSecret"), + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3UserResource, secretS3UserResource}) + + // Create the reconciler + reconciler := &user_controller.S3UserReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + }) + +} diff --git a/internal/controller/user/status.go b/internal/controller/user/status.go new file mode 100644 index 0000000..990a159 --- /dev/null +++ b/internal/controller/user/status.go @@ -0,0 +1,47 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *S3UserReconciler) SetReconciledCondition( + ctx context.Context, + req ctrl.Request, + userResource *s3v1alpha1.S3User, + reason string, + message string, + err error, +) (ctrl.Result, error) { + return r.ControllerHelper.SetReconciledCondition( + ctx, + r.Status(), + req, + userResource, + &userResource.Status.Conditions, + s3v1alpha1.ConditionReconciled, + reason, + message, + err, + r.ReconcilePeriod, + ) +} diff --git a/internal/controller/user/utils.go b/internal/controller/user/utils.go new file mode 100644 index 0000000..3968263 --- /dev/null +++ b/internal/controller/user/utils.go @@ -0,0 +1,186 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +import ( + "cmp" + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *S3UserReconciler) addPoliciesToUser( + ctx context.Context, + userResource *s3v1alpha1.S3User, +) error { + logger := log.FromContext(ctx) + // Create S3Client + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + userResource.Name, + userResource.Namespace, + userResource.Spec.S3InstanceRef, + ) + if err != nil { + return err + } + policies := userResource.Spec.Policies + if policies != nil { + err := s3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policies) + if err != nil { + logger.Error( + err, + "An error occurred while adding policy to user", + "user", + userResource.Name, + ) + return err + } + } + return nil +} + +func (r *S3UserReconciler) deleteOldLinkedSecret(ctx context.Context, userResource *s3v1alpha1.S3User) error { + logger := log.FromContext(ctx) + secretsList := &corev1.SecretList{} + + // Define options with label selector and namespace + listOpts := []client.ListOption{ + client.InNamespace(userResource.Namespace), // Filter by namespace + client.MatchingLabels{"app.kubernetes.io/created-by": "s3-operator"}, // Filter by label + } + + // List Secrets with the specified label in the given namespace + if err := r.List(ctx, secretsList, listOpts...); err != nil { + return fmt.Errorf("failed to list secrets in namespace %s: %w", userResource.Namespace, err) + } + + for _, secret := range secretsList.Items { + for _, ref := range secret.OwnerReferences { + if ref.UID == userResource.GetUID() { + if (userResource.Spec.SecretName != "" && secret.Name != userResource.Spec.SecretName) || (userResource.Spec.SecretName == "" && secret.Name != userResource.Name) { + if err := r.deleteSecret(ctx, &secret); err != nil { + logger.Info("Failed to delete unused secret", "secret", secret.Name) + return fmt.Errorf("failed to delete unused secret %s, err %w", secret.Name, err) + } + } + } + } + } + + return nil +} + +func (r *S3UserReconciler) getUserSecret( + ctx context.Context, + userResource *s3v1alpha1.S3User, +) (corev1.Secret, error) { + userSecret := &corev1.Secret{} + secretName := userResource.Spec.SecretName + if secretName == "" { + secretName = userResource.Name + } + err := r.Get( + ctx, + types.NamespacedName{Namespace: userResource.Namespace, Name: secretName}, + userSecret, + ) + if err != nil { + if k8sapierrors.IsNotFound(err) { + return *userSecret, fmt.Errorf( + "secret %s not found in namespace %s", + secretName, + userResource.Namespace, + ) + } + return *userSecret, err + } + + for _, ref := range userSecret.OwnerReferences { + if ref.UID == userResource.GetUID() { + return *userSecret, nil + } + } + + return *userSecret, err +} + +func (r *S3UserReconciler) deleteSecret(ctx context.Context, secret *corev1.Secret) error { + logger := log.FromContext(ctx) + logger.Info("the secret named " + secret.Name + " will be deleted") + err := r.Delete(ctx, secret) + if err != nil { + logger.Error(err, "An error occurred while deleting a secret") + return err + } + return nil +} + +// newSecretForCR returns a secret with the same name/namespace as the CR. +// The secret will include all labels and annotations from the CR. +func (r *S3UserReconciler) newSecretForCR( + ctx context.Context, + userResource *s3v1alpha1.S3User, + data map[string][]byte, +) (*corev1.Secret, error) { + logger := log.FromContext(ctx) + + // Reusing the S3User's labels and annotations + labels := map[string]string{} + labels["app.kubernetes.io/created-by"] = "s3-operator" + for k, v := range userResource.ObjectMeta.Labels { + labels[k] = v + } + + annotations := map[string]string{} + for k, v := range userResource.ObjectMeta.Annotations { + annotations[k] = v + } + + secretName := cmp.Or(userResource.Spec.SecretName, userResource.Name) + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: userResource.Namespace, + Labels: labels, + Annotations: annotations, + }, + Data: data, + Type: "Opaque", + } + + // Set S3User instance as the owner and controller + err := ctrl.SetControllerReference(userResource, secret, r.Scheme) + if err != nil { + logger.Error(err, "Could not set owner of kubernetes secret") + return nil, err + } + + return secret, nil + +} diff --git a/internal/helpers/S3instance_test.go b/internal/helpers/S3instance_test.go new file mode 100644 index 0000000..678416a --- /dev/null +++ b/internal/helpers/S3instance_test.go @@ -0,0 +1,228 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers_test + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + helpers "github.com/InseeFrLab/s3-operator/internal/helpers" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" +) + +func TestGetS3ClientForRessource(t *testing.T) { + + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Register the custom resource with the scheme sch := runtime.NewScheme() + s := scheme.Scheme + s3v1alpha1.AddToScheme(s) + corev1.AddToScheme(s) + + s3Instance := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + Status: s3v1alpha1.S3InstanceStatus{Conditions: []metav1.Condition{{Reason: s3v1alpha1.Reconciled}}}, + } + + s3Instance_not_ready := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "not-ready", + Namespace: "s3-operator", + }, + Status: s3v1alpha1.S3InstanceStatus{Conditions: []metav1.Condition{{Reason: s3v1alpha1.CreationFailure}}}, + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + client := fake.NewClientBuilder(). + WithScheme(s). + WithObjects(s3Instance, s3Instance_not_ready, secret). + WithStatusSubresource(s3Instance, s3Instance_not_ready, secret). + Build() + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + + t.Run("no error", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + _, err := s3instanceHelper.GetS3ClientForRessource(context.TODO(), client, testUtils.S3Factory, "bucket-example", "default", "s3-operator/default") + assert.NoError(t, err) + }) + + t.Run("error because instance not ready", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + _, err := s3instanceHelper.GetS3ClientForRessource(context.TODO(), client, testUtils.S3Factory, "bucket-example", "default", "s3-operator/not-ready") + assert.Equal(t, "S3instance is not in a ready state", err.Error()) + }) +} + +func TestGetS3ClientFromS3instance(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Register the custom resource with the scheme sch := runtime.NewScheme() + s := scheme.Scheme + s3v1alpha1.AddToScheme(s) + corev1.AddToScheme(s) + + s3Instance := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + client := fake.NewClientBuilder(). + WithScheme(s). + WithObjects(s3Instance, secret). + WithStatusSubresource(s3Instance, secret). + Build() + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + + t.Run("no error", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + _, err := s3instanceHelper.GetS3ClientFromS3Instance(context.TODO(), client, testUtils.S3Factory, s3Instance) + assert.NoError(t, err) + }) +} + +func TestGetS3InstanceRefInfo(t *testing.T) { + t.Run("Exact match", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.GetS3InstanceRefInfo("s3-operator/default", "default") + assert.Equal(t, true, result.Equal("default", "s3-operator")) + }) + + t.Run("Exact match", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.GetS3InstanceRefInfo("default", "default") + assert.Equal(t, true, result.Equal("default", "default")) + }) +} + +func TestIsAllowedNamespaces(t *testing.T) { + s3Instance := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + } + + t.Run("Exact match", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.IsAllowedNamespaces("default", s3Instance) + assert.Equal(t, true, result) + }) + + t.Run("Wildcard prefix", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.IsAllowedNamespaces("test-namespace", s3Instance) + assert.Equal(t, true, result) + }) + + t.Run("Wildcard suffix", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.IsAllowedNamespaces("my-namespace", s3Instance) + assert.Equal(t, true, result) + }) + + t.Run("Wildcard contains", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.IsAllowedNamespaces("this-is-allowed-namespace", s3Instance) + assert.Equal(t, true, result) + }) + + t.Run("Not allowed", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.IsAllowedNamespaces("random", s3Instance) + assert.Equal(t, false, result) + }) +} diff --git a/internal/helpers/controller.go b/internal/helpers/controller.go new file mode 100644 index 0000000..a243ada --- /dev/null +++ b/internal/helpers/controller.go @@ -0,0 +1,90 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "context" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/api/meta" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type ControllerHelper struct { +} + +func NewControllerHelper() *ControllerHelper { + return &ControllerHelper{} +} + +// SetReconciledCondition is a generic helper to update the reconciled condition for any Kubernetes resource. +func (c *ControllerHelper) SetReconciledCondition( + ctx context.Context, + statusWriter client.StatusWriter, // Allows updating status for any reconciler + req reconcile.Request, + resource client.Object, // Accepts any Kubernetes object with conditions + conditions *[]metav1.Condition, // Conditions field reference (must be a pointer) + conditionType string, // The type of condition to set + reason string, + message string, + err error, + requeueAfter time.Duration, // Requeue period for reconciliation +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + var changed bool + + if err != nil { + logger.Error(err, message, "NamespacedName", req.NamespacedName.String()) + changed = meta.SetStatusCondition( + conditions, + metav1.Condition{ + Type: conditionType, + Status: metav1.ConditionFalse, + ObservedGeneration: resource.GetGeneration(), + Reason: reason, + Message: fmt.Sprintf("%s: %s", message, err), + }, + ) + } else { + logger.Info(message, "NamespacedName", req.NamespacedName.String()) + changed = meta.SetStatusCondition( + conditions, + metav1.Condition{ + Type: conditionType, + Status: metav1.ConditionTrue, + ObservedGeneration: resource.GetGeneration(), + Reason: reason, + Message: message, + }, + ) + } + + if changed { + if errStatusUpdate := statusWriter.Update(ctx, resource); errStatusUpdate != nil { + logger.Error(errStatusUpdate, "Failed to update resource status", "ObjectKind", resource.GetObjectKind(), "NamespacedName", req.NamespacedName.String()) + return reconcile.Result{}, errStatusUpdate + } + } + + return reconcile.Result{RequeueAfter: requeueAfter}, err +} diff --git a/internal/helpers/controller_test.go b/internal/helpers/controller_test.go new file mode 100644 index 0000000..a71c4db --- /dev/null +++ b/internal/helpers/controller_test.go @@ -0,0 +1,119 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers_test + +import ( + "context" + "fmt" + "testing" + "time" + + ctrl "sigs.k8s.io/controller-runtime" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + "github.com/InseeFrLab/s3-operator/internal/helpers" + testUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" +) + +func TestSetReconciledCondition(t *testing.T) { + + log.SetLogger(zap.New(zap.UseDevMode(true))) + + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + testUtils := testUtils.NewTestUtils() + testUtils.SetupClient([]client.Object{s3instanceResource}) + controllerHelper := helpers.NewControllerHelper() + + t.Run("no error", func(t *testing.T) { + _, err := controllerHelper.SetReconciledCondition( + context.TODO(), + testUtils.Client.Status(), + ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}}, + s3instanceResource, + &s3instanceResource.Status.Conditions, + s3v1alpha1.Reconciled, + "s3Instance reconciled", + "s3Instance reconciled", + nil, time.Duration(10), + ) + assert.NoError(t, err) + }) + + t.Run("ressource status have changed", func(t *testing.T) { + s3instanceResourceUpdated := &s3v1alpha1.S3Instance{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "s3-operator", + Name: "default", + }, s3instanceResourceUpdated) + assert.NoError(t, err) + assert.Equal(t, s3v1alpha1.Reconciled, s3instanceResourceUpdated.Status.Conditions[0].Type) + assert.Equal(t, "s3Instance reconciled", s3instanceResourceUpdated.Status.Conditions[0].Message) + }) + + t.Run("with error", func(t *testing.T) { + _, err := controllerHelper.SetReconciledCondition( + context.TODO(), + testUtils.Client.Status(), + ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}}, + s3instanceResource, + &s3instanceResource.Status.Conditions, + s3v1alpha1.CreationFailure, + "s3Instance reconciled", + "s3Instance reconciled", + fmt.Errorf("Something wrong have happened"), time.Duration(10), + ) + + assert.NotNil(t, err) + + }) + + t.Run("ressource status have changed", func(t *testing.T) { + s3instanceResourceUpdated := &s3v1alpha1.S3Instance{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "s3-operator", + Name: "default", + }, s3instanceResourceUpdated) + assert.NoError(t, err) + assert.Equal(t, s3v1alpha1.CreationFailure, s3instanceResourceUpdated.Status.Conditions[1].Type) + assert.Contains(t, s3instanceResourceUpdated.Status.Conditions[1].Message, "Something wrong have happened") + }) +} diff --git a/controllers/utils/password/password_generator.go b/internal/helpers/password_generator.go similarity index 78% rename from controllers/utils/password/password_generator.go rename to internal/helpers/password_generator.go index 884ccef..b46adcc 100644 --- a/controllers/utils/password/password_generator.go +++ b/internal/helpers/password_generator.go @@ -1,4 +1,20 @@ -package password +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers import ( "crypto/rand" @@ -7,8 +23,11 @@ import ( "strings" ) -type PasswordGenerator interface { - Generate(int, int, int, bool, bool) (string, error) +type PasswordGenerator struct { +} + +func NewPasswordGenerator() *PasswordGenerator { + return &PasswordGenerator{} } const ( @@ -26,7 +45,7 @@ const ( ) // func GeneratePassword(length int, useLetters bool, useSpecial bool, useNum bool) string { -func Generate(length int, useLetters bool, useSpecial bool, useNum bool) (string, error) { +func (p *PasswordGenerator) Generate(length int, useLetters bool, useSpecial bool, useNum bool) (string, error) { gen, err := NewGenerator(nil) if err != nil { return "", err @@ -42,7 +61,14 @@ func Generate(length int, useLetters bool, useSpecial bool, useNum bool) (string // // The algorithm is fast, but it's not designed to be performant; it favors // entropy over speed. This function is safe for concurrent use. -func (g *Generator) Generate(length int, useDigit bool, useSymbol bool, useUpper bool, useLower bool, allowRepeat bool) (string, error) { +func (g *Generator) Generate( + length int, + useDigit bool, + useSymbol bool, + useUpper bool, + useLower bool, + allowRepeat bool, +) (string, error) { choices := "" if useDigit { diff --git a/internal/helpers/password_generator_test.go b/internal/helpers/password_generator_test.go new file mode 100644 index 0000000..4e27592 --- /dev/null +++ b/internal/helpers/password_generator_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers_test + +import ( + "testing" + + helpers "github.com/InseeFrLab/s3-operator/internal/helpers" + "github.com/stretchr/testify/assert" +) + +func TestGenerate(t *testing.T) { + t.Run("Exact match", func(t *testing.T) { + passwordGenerator := helpers.NewPasswordGenerator() + password, _ := passwordGenerator.Generate(20, true, true, true) + assert.Len(t, password, 20) + }) +} diff --git a/internal/helpers/s3instance.go b/internal/helpers/s3instance.go new file mode 100644 index 0000000..31e83c1 --- /dev/null +++ b/internal/helpers/s3instance.go @@ -0,0 +1,251 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "context" + "fmt" + "strings" + + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" + corev1 "k8s.io/api/core/v1" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +func (s3InstanceHelper *S3InstanceHelper) GetS3ClientForRessource( + ctx context.Context, + client client.Client, + s3factory s3factory.S3Factory, + ressourceName string, + ressourceNamespace string, + ressourceS3InstanceRef string, +) (s3client.S3Client, error) { + logger := log.FromContext(ctx) + logger.Info(fmt.Sprintf("Resource refer to s3Instance %s", ressourceS3InstanceRef)) + s3InstanceInfo := s3InstanceHelper.GetS3InstanceRefInfo(ressourceS3InstanceRef, ressourceNamespace) + s3Instance := &s3v1alpha1.S3Instance{} + err := client.Get( + ctx, + types.NamespacedName{Namespace: s3InstanceInfo.namespace, Name: s3InstanceInfo.name}, + s3Instance, + ) + + if err != nil { + if k8sapierrors.IsNotFound(err) { + return nil, fmt.Errorf("S3Instance %s not found", s3InstanceInfo.name) + } + return nil, err + } + + if !s3InstanceHelper.IsAllowedNamespaces(ressourceNamespace, s3Instance) { + logger.Info( + fmt.Sprintf( + "Resource %s try to use s3instance %s in namespace %s but is not allowed", + ressourceName, + s3InstanceInfo.name, + s3InstanceInfo.namespace, + ), + ) + return nil, fmt.Errorf("S3Instance %s not found", s3InstanceInfo.name) + } + + if s3Instance.Status.Conditions[0].Reason != s3v1alpha1.Reconciled { + return nil, fmt.Errorf("S3instance is not in a ready state") + } + + return s3InstanceHelper.GetS3ClientFromS3Instance(ctx, client, s3factory, s3Instance) +} + +func (s3InstanceHelper *S3InstanceHelper) GetS3ClientFromS3Instance( + ctx context.Context, + client client.Client, + s3factory s3factory.S3Factory, + s3InstanceResource *s3v1alpha1.S3Instance, +) (s3client.S3Client, error) { + logger := log.FromContext(ctx) + + s3InstanceSecretSecret, err := s3InstanceHelper.getS3InstanceAccessSecret(ctx, client, s3InstanceResource) + if err != nil { + logger.Error( + err, + "Could not get s3Instance auth secret in namespace", + "s3InstanceSecretRefName", + s3InstanceResource.Spec.SecretRef, + "NamespacedName", + s3InstanceResource.Namespace, + ) + return nil, err + } + + s3InstanceCaCertSecret, err := s3InstanceHelper.getS3InstanceCaCertSecret(ctx, client, s3InstanceResource) + if err != nil { + logger.Error( + err, + "Could not get s3Instance cert secret in namespace", + "s3InstanceSecretRefName", + s3InstanceResource.Spec.SecretRef, + "NamespacedName", + s3InstanceResource.Namespace, + ) + return nil, err + } + + allowedNamepaces := []string{s3InstanceResource.Namespace} + if len(s3InstanceResource.Spec.AllowedNamespaces) > 0 { + allowedNamepaces = s3InstanceResource.Spec.AllowedNamespaces + } + + s3Config := &s3client.S3Config{ + S3Provider: s3InstanceResource.Spec.S3Provider, + AccessKey: string(s3InstanceSecretSecret.Data["S3_ACCESS_KEY"]), + SecretKey: string(s3InstanceSecretSecret.Data["S3_SECRET_KEY"]), + S3Url: s3InstanceResource.Spec.Url, + Region: s3InstanceResource.Spec.Region, + AllowedNamespaces: allowedNamepaces, + CaCertificatesBase64: []string{string(s3InstanceCaCertSecret.Data["ca.crt"])}, + BucketDeletionEnabled: s3InstanceResource.Spec.BucketDeletionEnabled, + S3UserDeletionEnabled: s3InstanceResource.Spec.S3UserDeletionEnabled, + PolicyDeletionEnabled: s3InstanceResource.Spec.PolicyDeletionEnabled, + PathDeletionEnabled: s3InstanceResource.Spec.PathDeletionEnabled, + } + + return s3factory.GenerateS3Client(s3Config.S3Provider, s3Config) +} + +func (s3InstanceHelper *S3InstanceHelper) getS3InstanceAccessSecret( + ctx context.Context, + client client.Client, + s3InstanceResource *s3v1alpha1.S3Instance, +) (corev1.Secret, error) { + s3InstanceSecret := &corev1.Secret{} + err := client.Get( + ctx, + types.NamespacedName{ + Namespace: s3InstanceResource.Namespace, + Name: s3InstanceResource.Spec.SecretRef, + }, + s3InstanceSecret, + ) + if err != nil { + if k8sapierrors.IsNotFound(err) { + return *s3InstanceSecret, fmt.Errorf( + "secret %s not found in namespace %s", + s3InstanceResource.Spec.SecretRef, + s3InstanceResource.Namespace, + ) + } + return *s3InstanceSecret, err + } + return *s3InstanceSecret, nil +} + +func (s3InstanceHelper *S3InstanceHelper) getS3InstanceCaCertSecret( + ctx context.Context, + client client.Client, + s3InstanceResource *s3v1alpha1.S3Instance, +) (corev1.Secret, error) { + logger := log.FromContext(ctx) + + s3InstanceCaCertSecret := &corev1.Secret{} + + if s3InstanceResource.Spec.CaCertSecretRef == "" { + logger.Info(fmt.Sprintf("No CaCertSecretRef for s3instance %s", s3InstanceResource.Name)) + return *s3InstanceCaCertSecret, nil + } + + err := client.Get( + ctx, + types.NamespacedName{ + Namespace: s3InstanceResource.Namespace, + Name: s3InstanceResource.Spec.CaCertSecretRef, + }, + s3InstanceCaCertSecret, + ) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + "No Secret %s for s3instance %s", + s3InstanceResource.Spec.CaCertSecretRef, + s3InstanceResource.Name, + ) + return *s3InstanceCaCertSecret, fmt.Errorf( + "secret %s not found in namespace %s", + s3InstanceResource.Spec.CaCertSecretRef, + s3InstanceResource.Namespace, + ) + } + return *s3InstanceCaCertSecret, err + } + return *s3InstanceCaCertSecret, nil +} + +func (s3InstanceHelper *S3InstanceHelper) GetS3InstanceRefInfo(ressourceS3InstanceRef string, ressourceNamespace string) S3InstanceInfo { + if strings.Contains(ressourceS3InstanceRef, "/") { + result := strings.Split(ressourceS3InstanceRef, "/") + return S3InstanceInfo{name: result[1], namespace: result[0]} + } + return S3InstanceInfo{name: ressourceS3InstanceRef, namespace: ressourceNamespace} +} + +func (s3InstanceHelper *S3InstanceHelper) IsAllowedNamespaces(namespace string, s3Instance *s3v1alpha1.S3Instance) bool { + if len(s3Instance.Spec.AllowedNamespaces) > 0 { + for _, allowedNamespace := range s3Instance.Spec.AllowedNamespaces { + if strings.HasPrefix(allowedNamespace, "*") && + strings.HasSuffix(allowedNamespace, "*") && strings.Contains( + namespace, + strings.TrimSuffix(strings.TrimPrefix(allowedNamespace, "*"), "*"), + ) { + return true + } else if strings.HasPrefix(allowedNamespace, "*") && strings.HasSuffix(namespace, strings.TrimPrefix(allowedNamespace, "*")) { + return true + } else if strings.HasSuffix(allowedNamespace, "*") && strings.HasPrefix(namespace, strings.TrimSuffix(allowedNamespace, "*")) { + return true + } else if namespace == allowedNamespace { + return true + } + } + return false + } else { + return namespace == s3Instance.Namespace + } +} + +type S3InstanceInfo struct { + name string + namespace string +} + +func (s3InstanceInfo S3InstanceInfo) String() string { + return fmt.Sprintf("%s/%s", s3InstanceInfo.namespace, s3InstanceInfo.name) +} + +func (s3InstanceInfo S3InstanceInfo) Equal(s3InstanceInfoName string, s3InstanceInfoNamespace string) bool { + return s3InstanceInfo.name == s3InstanceInfoName && s3InstanceInfo.namespace == s3InstanceInfoNamespace +} + +type S3InstanceHelper struct { +} + +func NewS3InstanceHelper() *S3InstanceHelper { + return &S3InstanceHelper{} +} diff --git a/controllers/s3/factory/minioS3Client.go b/internal/s3/client/impl/minioS3Client.go similarity index 56% rename from controllers/s3/factory/minioS3Client.go rename to internal/s3/client/impl/minioS3Client.go index fb307d7..c340fbd 100644 --- a/controllers/s3/factory/minioS3Client.go +++ b/internal/s3/client/impl/minioS3Client.go @@ -1,107 +1,177 @@ -package factory +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3clientimpl import ( "bytes" "context" "crypto/tls" "crypto/x509" - "encoding/base64" + "fmt" "net/http" - "os" + neturl "net/url" "strings" + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" + ctrl "sigs.k8s.io/controller-runtime" ) type MinioS3Client struct { - s3Config S3Config + s3Config s3client.S3Config client minio.Client adminClient madmin.AdminClient } -func newMinioS3Client(S3Config *S3Config) *MinioS3Client { +func NewMinioS3Client(S3Config *s3client.S3Config) (*MinioS3Client, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("creating minio clients (regular and admin)") + minioClient, err := generateMinioClient( + S3Config.S3Url, + S3Config.AccessKey, + S3Config.SecretKey, + S3Config.Region, + S3Config.CaCertificatesBase64, + ) + if err != nil { + s3Logger.Error(err, "an error occurred while creating a new minio client") + return nil, err + } + adminClient, err := generateAdminMinioClient( + S3Config.S3Url, + S3Config.AccessKey, + S3Config.SecretKey, + S3Config.CaCertificatesBase64, + ) + if err != nil { + s3Logger.Error(err, "an error occurred while creating a new minio admin client") + return nil, err + } + return &MinioS3Client{*S3Config, *minioClient, *adminClient}, nil +} + +func generateMinioClient( + url string, + accessKey string, + secretKey string, + region string, + caCertificates []string, +) (*minio.Client, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") + hostname, isSSL, err := extractHostAndScheme(url) + if err != nil { + s3Logger.Error(err, "an error occurred while creating a new minio client") + return nil, err + } minioOptions := &minio.Options{ - Creds: credentials.NewStaticV4(S3Config.AccessKey, S3Config.SecretKey, ""), - Region: S3Config.Region, - Secure: S3Config.UseSsl, - } - - // Preparing the tlsConfig to support custom CA if configured - // See also : - // - https://pkg.go.dev/github.com/minio/minio-go/v7@v7.0.52#Options - // - https://pkg.go.dev/net/http#RoundTripper - // - https://youngkin.github.io/post/gohttpsclientserver/#create-the-client - // - https://forfuncsake.github.io/post/2017/08/trust-extra-ca-cert-in-go-app/ - // Appending content directly, from a base64-encoded, PEM format CA certificate - // Variant : if S3Config.CaBundlePath was a string[] - // for _, caCertificateFilePath := range S3Config.S3Config.CaBundlePaths { - // caCert, err := os.ReadFile(caCertificateFilePath) - // if err != nil { - // log.Fatalf("Error opening CA cert file %s, Error: %s", caCertificateFilePath, err) - // } - // rootCAs.AppendCertsFromPEM([]byte(caCert)) - // } - addTransportOptions(S3Config, minioOptions) - - minioClient, err := minio.New(S3Config.S3UrlEndpoint, minioOptions) + Creds: credentials.NewStaticV4(accessKey, secretKey, ""), + Region: region, + Secure: isSSL, + } + + if len(caCertificates) > 0 { + addTlsClientConfigToMinioOptions(caCertificates, minioOptions) + } + + minioClient, err := minio.New(hostname, minioOptions) if err != nil { s3Logger.Error(err, "an error occurred while creating a new minio client") + return nil, err + } + return minioClient, nil +} + +func generateAdminMinioClient( + url string, + accessKey string, + secretKey string, + caCertificates []string, +) (*madmin.AdminClient, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") + hostname, isSSL, err := extractHostAndScheme(url) + if err != nil { + s3Logger.Error(err, "an error occurred while creating a new minio admin client") + return nil, err + } + + minioOptions := &madmin.Options{ + Creds: credentials.NewStaticV4(accessKey, secretKey, ""), + Secure: isSSL, + } + + if len(caCertificates) > 0 { + addTlsClientConfigToMinioAdminOptions(caCertificates, minioOptions) } - adminClient, err := madmin.New(S3Config.S3UrlEndpoint, S3Config.AccessKey, S3Config.SecretKey, S3Config.UseSsl) + minioAdminClient, err := madmin.NewWithOptions(hostname, minioOptions) if err != nil { s3Logger.Error(err, "an error occurred while creating a new minio admin client") + return nil, err } - // Getting the custom root CA (if any) from the "regular" client's Transport - adminClient.SetCustomTransport(minioOptions.Transport) - return &MinioS3Client{*S3Config, *minioClient, *adminClient} + return minioAdminClient, nil } -func addTransportOptions(S3Config *S3Config, minioOptions *minio.Options) { - if len(S3Config.CaCertificatesBase64) > 0 { +func extractHostAndScheme(url string) (string, bool, error) { + parsedURL, err := neturl.Parse(url) + if err != nil { + return "", false, fmt.Errorf("cannot detect if url use ssl or not") + } + return parsedURL.Hostname(), parsedURL.Scheme == "https", nil +} - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } +func addTlsClientConfigToMinioOptions(caCertificates []string, minioOptions *minio.Options) { + rootCAs, _ := x509.SystemCertPool() + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } - for _, caCertificateBase64 := range S3Config.CaCertificatesBase64 { - decodedCaCertificate, err := base64.StdEncoding.DecodeString(caCertificateBase64) - if err != nil { - s3Logger.Error(err, "an error occurred while parsing a base64-encoded CA certificate") - } + for _, caCertificate := range caCertificates { + rootCAs.AppendCertsFromPEM([]byte(caCertificate)) + } - rootCAs.AppendCertsFromPEM(decodedCaCertificate) - } + minioOptions.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: rootCAs, + }, + } +} - minioOptions.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: rootCAs, - }, - } - } else if len(S3Config.CaBundlePath) > 0 { +func addTlsClientConfigToMinioAdminOptions(caCertificates []string, minioOptions *madmin.Options) { + rootCAs, _ := x509.SystemCertPool() + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } + for _, caCertificate := range caCertificates { + // caCertificateAsByte := []byte(caCertificate) + // caCertificateEncoded := base64.StdEncoding.EncodeToString(caCertificateAsByte) + // rootCAs.AppendCertsFromPEM([]byte(caCertificateEncoded)) + rootCAs.AppendCertsFromPEM([]byte(caCertificate)) - caCert, err := os.ReadFile(S3Config.CaBundlePath) - if err != nil { - s3Logger.Error(err, "an error occurred while reading a CA certificates bundle file") - } - rootCAs.AppendCertsFromPEM([]byte(caCert)) + } - minioOptions.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: rootCAs, - }, - } + minioOptions.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: rootCAs, + }, } } @@ -109,33 +179,72 @@ func addTransportOptions(S3Config *S3Config, minioOptions *minio.Options) { // Bucket methods // // ////////////////// func (minioS3Client *MinioS3Client) BucketExists(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("checking bucket existence", "bucket", name) return minioS3Client.client.BucketExists(context.Background(), name) } func (minioS3Client *MinioS3Client) CreateBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("creating bucket", "bucket", name) - return minioS3Client.client.MakeBucket(context.Background(), name, minio.MakeBucketOptions{Region: minioS3Client.s3Config.Region}) + return minioS3Client.client.MakeBucket( + context.Background(), + name, + minio.MakeBucketOptions{Region: minioS3Client.s3Config.Region}, + ) +} + +func (minioS3Client *MinioS3Client) ListBuckets() ([]string, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") + s3Logger.Info("listing bucket") + listBucketsInfo, err := minioS3Client.client.ListBuckets(context.Background()) + bucketsName := []string{} + if err != nil { + errAsResponse := minio.ToErrorResponse(err) + s3Logger.Error(err, "an error occurred while listing buckets", "code", errAsResponse.Code) + return bucketsName, err + } + for _, bucketInfo := range listBucketsInfo { + bucketsName = append(bucketsName, bucketInfo.Name) + } + return bucketsName, nil } // Will fail if bucket is not empty func (minioS3Client *MinioS3Client) DeleteBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("deleting bucket", "bucket", name) return minioS3Client.client.RemoveBucket(context.Background(), name) } func (minioS3Client *MinioS3Client) CreatePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("creating a path on a bucket", "bucket", bucketname, "path", path) emptyReader := bytes.NewReader([]byte("")) - _, err := minioS3Client.client.PutObject(context.Background(), bucketname, "/"+path+"/"+".keep", emptyReader, 0, minio.PutObjectOptions{}) + _, err := minioS3Client.client.PutObject( + context.Background(), + bucketname, + "/"+path+"/"+".keep", + emptyReader, + 0, + minio.PutObjectOptions{}, + ) if err != nil { - s3Logger.Error(err, "an error occurred during path creation on bucket", "bucket", bucketname, "path", path) + s3Logger.Error( + err, + "an error occurred during path creation on bucket", + "bucket", + bucketname, + "path", + path, + ) return err } return nil } func (minioS3Client *MinioS3Client) PathExists(bucketname string, path string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("checking path existence on a bucket", "bucket", bucketname, "path", path) _, err := minioS3Client.client. StatObject(context.Background(), @@ -158,10 +267,23 @@ func (minioS3Client *MinioS3Client) PathExists(bucketname string, path string) ( } func (minioS3Client *MinioS3Client) DeletePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("deleting a path on a bucket", "bucket", bucketname, "path", path) - err := minioS3Client.client.RemoveObject(context.Background(), bucketname, "/"+path+"/.keep", minio.RemoveObjectOptions{}) + err := minioS3Client.client.RemoveObject( + context.Background(), + bucketname, + "/"+path+"/.keep", + minio.RemoveObjectOptions{}, + ) if err != nil { - s3Logger.Error(err, "an error occurred during path deletion on bucket", "bucket", bucketname, "path", path) + s3Logger.Error( + err, + "an error occurred during path deletion on bucket", + "bucket", + bucketname, + "path", + path, + ) return err } return nil @@ -171,6 +293,7 @@ func (minioS3Client *MinioS3Client) DeletePath(bucketname string, path string) e // Quota methods // // ///////////////// func (minioS3Client *MinioS3Client) GetQuota(name string) (int64, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("getting quota on bucket", "bucket", name) bucketQuota, err := minioS3Client.adminClient.GetBucketQuota(context.Background(), name) if err != nil { @@ -180,8 +303,13 @@ func (minioS3Client *MinioS3Client) GetQuota(name string) (int64, error) { } func (minioS3Client *MinioS3Client) SetQuota(name string, quota int64) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("setting quota on bucket", "bucket", name, "quotaToSet", quota) - minioS3Client.adminClient.SetBucketQuota(context.Background(), name, &madmin.BucketQuota{Quota: uint64(quota), Type: madmin.HardQuota}) + minioS3Client.adminClient.SetBucketQuota( + context.Background(), + name, + &madmin.BucketQuota{Quota: uint64(quota), Type: madmin.HardQuota}, + ) return nil } @@ -202,6 +330,7 @@ func (minioS3Client *MinioS3Client) SetQuota(name string, quota int64) error { // A consequence is that we do things a little differently compared to buckets - instead of just testing for // existence, we get the whole policy info, and the controller uses it down the line. func (minioS3Client *MinioS3Client) GetPolicyInfo(name string) (*madmin.PolicyInfo, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("retrieving policy info", "policy", name) policy, err := minioS3Client.adminClient.InfoCannedPolicyV2(context.Background(), name) @@ -225,11 +354,13 @@ func (minioS3Client *MinioS3Client) GetPolicyInfo(name string) (*madmin.PolicyIn // The AddCannedPolicy of the madmin client actually does both creation and update (so does the CLI, as both // are wired to the same endpoint on Minio API server). func (minioS3Client *MinioS3Client) CreateOrUpdatePolicy(name string, content string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("create or update policy", "policy", name) return minioS3Client.adminClient.AddCannedPolicy(context.Background(), name, []byte(content)) } func (minioS3Client *MinioS3Client) PolicyExist(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("checking policy existence", "policy", name) policies, err := minioS3Client.adminClient.ListPolicies(context.Background(), name) if err != nil { @@ -245,6 +376,7 @@ func (minioS3Client *MinioS3Client) PolicyExist(name string) (bool, error) { } func (minioS3Client *MinioS3Client) DeletePolicy(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("delete policy", "policy", name) return minioS3Client.adminClient.RemoveCannedPolicy(context.Background(), name) } @@ -254,6 +386,7 @@ func (minioS3Client *MinioS3Client) DeletePolicy(name string) error { //////////////////// func (minioS3Client *MinioS3Client) CreateUser(accessKey string, secretKey string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Creating user", "accessKey", accessKey) err := minioS3Client.adminClient.AddUser(context.Background(), accessKey, secretKey) if err != nil { @@ -263,7 +396,12 @@ func (minioS3Client *MinioS3Client) CreateUser(accessKey string, secretKey strin return nil } -func (minioS3Client *MinioS3Client) AddServiceAccountForUser(name string, accessKey string, secretKey string) error { +func (minioS3Client *MinioS3Client) AddServiceAccountForUser( + name string, + accessKey string, + secretKey string, +) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Adding service account for user", "user", name, "accessKey", accessKey) opts := madmin.AddServiceAccountReq{ @@ -285,6 +423,7 @@ func (minioS3Client *MinioS3Client) AddServiceAccountForUser(name string, access } func (minioS3Client *MinioS3Client) UserExist(accessKey string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("checking user existence", "accessKey", accessKey) _, _err := minioS3Client.adminClient.GetUserInfo(context.Background(), accessKey) if _err != nil { @@ -299,6 +438,7 @@ func (minioS3Client *MinioS3Client) UserExist(accessKey string) (bool, error) { } func (minioS3Client *MinioS3Client) DeleteUser(accessKey string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("delete user with accessKey", "accessKey", accessKey) err := minioS3Client.adminClient.RemoveUser(context.Background(), accessKey) if err != nil { @@ -313,6 +453,7 @@ func (minioS3Client *MinioS3Client) DeleteUser(accessKey string) error { } func (minioS3Client *MinioS3Client) GetUserPolicies(accessKey string) ([]string, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Get user policies", "accessKey", accessKey) userInfo, err := minioS3Client.adminClient.GetUserInfo(context.Background(), accessKey) if err != nil { @@ -323,24 +464,36 @@ func (minioS3Client *MinioS3Client) GetUserPolicies(accessKey string) ([]string, return strings.Split(userInfo.PolicyName, ","), nil } -func (minioS3Client *MinioS3Client) CheckUserCredentialsValid(name string, accessKey string, secretKey string) (bool, error) { +func (minioS3Client *MinioS3Client) CheckUserCredentialsValid( + name string, + accessKey string, + secretKey string, +) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Check credentials for user", "user", name, "accessKey", accessKey) - minioTestClientOptions := &minio.Options{ - Creds: credentials.NewStaticV4(accessKey, secretKey, ""), - Region: minioS3Client.s3Config.Region, - Secure: minioS3Client.s3Config.UseSsl, - } - addTransportOptions(&minioS3Client.s3Config, minioTestClientOptions) - minioTestClient, err := minio.New(minioS3Client.s3Config.S3UrlEndpoint, minioTestClientOptions) + + minioTestClient, err := generateMinioClient( + minioS3Client.s3Config.S3Url, + accessKey, + secretKey, + minioS3Client.s3Config.Region, + minioS3Client.s3Config.CaCertificatesBase64, + ) if err != nil { s3Logger.Error(err, "An error occurred while creating a new Minio test client") + return false, err } - _, err = minioTestClient.ListBuckets(context.Background()) if err != nil { errAsResponse := minio.ToErrorResponse(err) if errAsResponse.Code == "SignatureDoesNotMatch" { - s3Logger.Info("the user credentials appear to be invalid", "accessKey", accessKey, "s3BackendError", errAsResponse) + s3Logger.Info( + "the user credentials appear to be invalid", + "accessKey", + accessKey, + "s3BackendError", + errAsResponse, + ) return false, nil } else if errAsResponse.Code == "InvalidAccessKeyId" { s3Logger.Info("this accessKey does not exist on the s3 backend", "accessKey", accessKey, "s3BackendError", errAsResponse) @@ -353,7 +506,11 @@ func (minioS3Client *MinioS3Client) CheckUserCredentialsValid(name string, acces return true, nil } -func (minioS3Client *MinioS3Client) RemovePoliciesFromUser(accessKey string, policies []string) error { +func (minioS3Client *MinioS3Client) RemovePoliciesFromUser( + accessKey string, + policies []string, +) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Removing policies from user", "user", accessKey, "policies", policies) opts := madmin.PolicyAssociationReq{ @@ -369,7 +526,12 @@ func (minioS3Client *MinioS3Client) RemovePoliciesFromUser(accessKey string, pol s3Logger.Info("The policy change has no net effect") return nil } - s3Logger.Error(err, "an error occurred when attaching a policy to the user", "code", errAsResp.Code) + s3Logger.Error( + err, + "an error occurred when attaching a policy to the user", + "code", + errAsResp.Code, + ) return err } @@ -377,6 +539,7 @@ func (minioS3Client *MinioS3Client) RemovePoliciesFromUser(accessKey string, pol } func (minioS3Client *MinioS3Client) AddPoliciesToUser(accessKey string, policies []string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Adding policies to user", "user", accessKey, "policies", policies) opts := madmin.PolicyAssociationReq{ User: accessKey, @@ -389,8 +552,17 @@ func (minioS3Client *MinioS3Client) AddPoliciesToUser(accessKey string, policies s3Logger.Info("The policy change has no net effect") return nil } - s3Logger.Error(err, "an error occurred when attaching a policy to the user", "code", errAsResp.Code) + s3Logger.Error( + err, + "an error occurred when attaching a policy to the user", + "code", + errAsResp.Code, + ) return err } return nil } + +func (minioS3Client *MinioS3Client) GetConfig() *s3client.S3Config { + return &minioS3Client.s3Config +} diff --git a/controllers/s3/factory/mockedS3Client.go b/internal/s3/client/impl/mockedS3Client.go similarity index 59% rename from controllers/s3/factory/mockedS3Client.go rename to internal/s3/client/impl/mockedS3Client.go index 948a5c4..ea79403 100644 --- a/controllers/s3/factory/mockedS3Client.go +++ b/internal/s3/client/impl/mockedS3Client.go @@ -1,111 +1,159 @@ -package factory +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3clientimpl import ( + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" "github.com/minio/madmin-go/v3" + ctrl "sigs.k8s.io/controller-runtime" ) -type MockedS3Client struct{} +type MockedS3Client struct { + s3Config s3client.S3Config +} func (mockedS3Provider *MockedS3Client) BucketExists(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking bucket existence", "bucket", name) return false, nil } func (mockedS3Provider *MockedS3Client) CreateBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking a bucket", "bucket", name) return nil } func (mockedS3Provider *MockedS3Client) DeleteBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("deleting a bucket", "bucket", name) return nil } func (mockedS3Provider *MockedS3Client) CreatePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("creating a path on a bucket", "bucket", bucketname, "path", path) return nil } func (mockedS3Provider *MockedS3Client) PathExists(bucketname string, path string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking path existence on a bucket", "bucket", bucketname, "path", path) return true, nil } func (mockedS3Provider *MockedS3Client) DeletePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("deleting a path on a bucket", "bucket", bucketname, "path", path) return nil } func (mockedS3Provider *MockedS3Client) GetQuota(name string) (int64, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("getting quota on bucket", "bucket", name) return 1, nil } func (mockedS3Provider *MockedS3Client) SetQuota(name string, quota int64) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("setting quota on bucket", "bucket", name, "quotaToSet", quota) return nil } func (mockedS3Provider *MockedS3Client) GetPolicyInfo(name string) (*madmin.PolicyInfo, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("retrieving policy info", "policy", name) return nil, nil } func (mockedS3Provider *MockedS3Client) CreateOrUpdatePolicy(name string, content string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("create or update policy", "policy", name, "policyContent", content) return nil } func (mockedS3Provider *MockedS3Client) CreateUser(name string, password string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("create or update user", "user", name) return nil } func (mockedS3Provider *MockedS3Client) UserExist(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking user existence", "user", name) return true, nil } func (mockedS3Provider *MockedS3Client) AddServiceAccountForUser(name string, accessKey string, secretKey string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("Adding service account for user", "user", name) return nil } func (mockedS3Provider *MockedS3Client) PolicyExist(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking policy existence", "policy", name) return true, nil } func (mockedS3Provider *MockedS3Client) AddPoliciesToUser(username string, policies []string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("Adding policies to user", "user", username, "policies", policies) return nil } func (mockedS3Provider *MockedS3Client) DeletePolicy(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("delete policy", "policy", name) return nil } func (mockedS3Provider *MockedS3Client) DeleteUser(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("delete user", "user", name) return nil } func (mockedS3Provider *MockedS3Client) CheckUserCredentialsValid(name string, accessKey string, secretKey string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking credential for user", "user", name) return true, nil } func (mockedS3Provider *MockedS3Client) GetUserPolicies(name string) ([]string, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("Getting user policies for user", "user", name) return []string{}, nil } func (mockedS3Provider *MockedS3Client) RemovePoliciesFromUser(username string, policies []string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("Removing policies from user", "user", username) return nil } -func newMockedS3Client() *MockedS3Client { - return &MockedS3Client{} +func (mockedS3Provider *MockedS3Client) ListBuckets() ([]string, error) { + return []string{}, nil +} + +func (mockedS3Provider *MockedS3Client) GetConfig() *s3client.S3Config { + return &mockedS3Provider.s3Config +} + +func NewMockedS3Client() *MockedS3Client { + return &MockedS3Client{s3Config: s3client.S3Config{}} } diff --git a/controllers/s3/factory/interface.go b/internal/s3/client/s3client.go similarity index 56% rename from controllers/s3/factory/interface.go rename to internal/s3/client/s3client.go index e8bf10f..cc48515 100644 --- a/controllers/s3/factory/interface.go +++ b/internal/s3/client/s3client.go @@ -1,17 +1,39 @@ -package factory +/* +Copyright 2023. -import ( - "fmt" +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - "github.com/minio/madmin-go/v3" + http://www.apache.org/licenses/LICENSE-2.0 - ctrl "sigs.k8s.io/controller-runtime" -) +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3client -var ( - s3Logger = ctrl.Log.WithValues("logger", "s3client") +import ( + "github.com/minio/madmin-go/v3" ) +type S3Config struct { + S3Provider string + S3Url string + Region string + AccessKey string + SecretKey string + CaCertificatesBase64 []string + AllowedNamespaces []string + BucketDeletionEnabled bool + S3UserDeletionEnabled bool + PathDeletionEnabled bool + PolicyDeletionEnabled bool +} + type S3Client interface { BucketExists(name string) (bool, error) CreateBucket(name string) error @@ -35,25 +57,6 @@ type S3Client interface { GetUserPolicies(name string) ([]string, error) AddPoliciesToUser(accessKey string, policies []string) error RemovePoliciesFromUser(accessKey string, policies []string) error -} - -type S3Config struct { - S3Provider string - S3UrlEndpoint string - Region string - AccessKey string - SecretKey string - UseSsl bool - CaCertificatesBase64 []string - CaBundlePath string -} - -func GetS3Client(s3Provider string, S3Config *S3Config) (S3Client, error) { - if s3Provider == "mockedS3Provider" { - return newMockedS3Client(), nil - } - if s3Provider == "minio" { - return newMinioS3Client(S3Config), nil - } - return nil, fmt.Errorf("s3 provider " + s3Provider + "not supported") + GetConfig() *S3Config + ListBuckets() ([]string, error) } diff --git a/internal/s3/factory/impl/s3factoryImpl.go b/internal/s3/factory/impl/s3factoryImpl.go new file mode 100644 index 0000000..8cca6fe --- /dev/null +++ b/internal/s3/factory/impl/s3factoryImpl.go @@ -0,0 +1,41 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3factory + +import ( + "fmt" + + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" + s3clientImpl "github.com/InseeFrLab/s3-operator/internal/s3/client/impl" +) + +type S3Factory struct { +} + +func NewS3Factory() *S3Factory { + return &S3Factory{} +} + +func (mockedS3Provider *S3Factory) GenerateS3Client(s3Provider string, s3Config *s3client.S3Config) (s3client.S3Client, error) { + if s3Provider == "mockedS3Provider" { + return s3clientImpl.NewMockedS3Client(), nil + } + if s3Provider == "minio" { + return s3clientImpl.NewMinioS3Client(s3Config) + } + return nil, fmt.Errorf("s3 provider %s not supported", s3Provider) +} diff --git a/internal/s3/factory/s3factory.go b/internal/s3/factory/s3factory.go new file mode 100644 index 0000000..2042b4f --- /dev/null +++ b/internal/s3/factory/s3factory.go @@ -0,0 +1,25 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3factory + +import ( + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" +) + +type S3Factory interface { + GenerateS3Client(s3Provider string, s3Config *s3client.S3Config) (s3client.S3Client, error) +} diff --git a/test/mocks/S3FactoryMock.go b/test/mocks/S3FactoryMock.go new file mode 100644 index 0000000..60adc2a --- /dev/null +++ b/test/mocks/S3FactoryMock.go @@ -0,0 +1,37 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mocks + +import ( + "github.com/stretchr/testify/mock" + + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" +) + +// Mocked Factory +type MockedS3ClientFactory struct { + mock.Mock +} + +func NewMockedS3ClientFactory() *MockedS3ClientFactory { + return &MockedS3ClientFactory{} +} + +func (m *MockedS3ClientFactory) GenerateS3Client(s3Provider string, s3Config *s3client.S3Config) (s3client.S3Client, error) { + args := m.Called(s3Provider, s3Config) + return args.Get(0).(s3client.S3Client), args.Error(1) +} diff --git a/test/mocks/mockedS3Client.go b/test/mocks/mockedS3Client.go new file mode 100644 index 0000000..8418004 --- /dev/null +++ b/test/mocks/mockedS3Client.go @@ -0,0 +1,202 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mocks + +import ( + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" + "github.com/minio/madmin-go/v3" + "github.com/stretchr/testify/mock" + ctrl "sigs.k8s.io/controller-runtime" +) + +type MockedS3Client struct { + s3Config s3client.S3Config + mock.Mock +} + +func (mockedS3Provider *MockedS3Client) BucketExists(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking bucket existence", "bucket", name) + args := mockedS3Provider.Called(name) + return args.Bool(0), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) CreateBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking a bucket", "bucket", name) + args := mockedS3Provider.Called(name) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) DeleteBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("deleting a bucket", "bucket", name) + args := mockedS3Provider.Called(name) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) CreatePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("creating a path on a bucket", "bucket", bucketname, "path", path) + args := mockedS3Provider.Called(bucketname, path) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) PathExists(bucketname string, path string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking path existence on a bucket", "bucket", bucketname, "path", path) + args := mockedS3Provider.Called(bucketname, path) + return args.Bool(0), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) DeletePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("deleting a path on a bucket", "bucket", bucketname, "path", path) + args := mockedS3Provider.Called(bucketname, path) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) GetQuota(name string) (int64, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("getting quota on bucket", "bucket", name) + args := mockedS3Provider.Called(name) + return int64(args.Int(0)), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) SetQuota(name string, quota int64) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("setting quota on bucket", "bucket", name, "quotaToSet", quota) + args := mockedS3Provider.Called(name, quota) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) GetPolicyInfo(name string) (*madmin.PolicyInfo, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("retrieving policy info", "policy", name) + args := mockedS3Provider.Called(name) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*madmin.PolicyInfo), args.Error(1) + +} + +func (mockedS3Provider *MockedS3Client) CreateOrUpdatePolicy(name string, content string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("create or update policy", "policy", name, "policyContent", content) + args := mockedS3Provider.Called(name, content) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) CreateUser(name string, password string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("create or update user", "user", name) + args := mockedS3Provider.Called(name, password) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) UserExist(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking user existence", "user", name) + args := mockedS3Provider.Called(name) + return args.Bool(0), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) AddServiceAccountForUser( + name string, + accessKey string, + secretKey string, +) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("Adding service account for user", "user", name) + args := mockedS3Provider.Called(name) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) PolicyExist(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking policy existence", "policy", name) + args := mockedS3Provider.Called(name) + return args.Bool(0), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) AddPoliciesToUser( + username string, + policies []string, +) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("Adding policies to user", "user", username, "policies", policies) + args := mockedS3Provider.Called(username, policies) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) DeletePolicy(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("delete policy", "policy", name) + args := mockedS3Provider.Called(name) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) DeleteUser(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("delete user", "user", name) + args := mockedS3Provider.Called(name) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) CheckUserCredentialsValid( + name string, + accessKey string, + secretKey string, +) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking credential for user", "user", name) + args := mockedS3Provider.Called(name, accessKey, secretKey) + return args.Bool(0), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) GetUserPolicies(name string) ([]string, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("Getting user policies for user", "user", name) + args := mockedS3Provider.Called(name) + return args.Get(0).([]string), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) RemovePoliciesFromUser( + username string, + policies []string, +) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("Removing policies from user", "user", username) + args := mockedS3Provider.Called(username, policies) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) ListBuckets() ([]string, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("Listing bucket") + args := mockedS3Provider.Called() + return args.Get(0).([]string), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) GetConfig() *s3client.S3Config { + return &mockedS3Provider.s3Config +} + +func NewMockedS3Client() *MockedS3Client { + return &MockedS3Client{s3Config: s3client.S3Config{}} +} diff --git a/test/utils/testUtils.go b/test/utils/testUtils.go new file mode 100644 index 0000000..b899496 --- /dev/null +++ b/test/utils/testUtils.go @@ -0,0 +1,188 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testUtils + +import ( + "fmt" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + "github.com/InseeFrLab/s3-operator/test/mocks" + "github.com/minio/madmin-go/v3" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +type TestUtils struct { + S3Factory s3factory.S3Factory + Client client.Client +} + +func NewTestUtils() *TestUtils { + return &TestUtils{} +} + +func (t *TestUtils) SetupMockedS3FactoryAndClient() { + mockedS3Client := mocks.NewMockedS3Client() + mockedS3Client.On("BucketExists", "test-bucket").Return(false, nil) + mockedS3Client.On("BucketExists", "existing-bucket").Return(true, nil) + mockedS3Client.On("CreateBucket", "test-bucket").Return(nil) + mockedS3Client.On("SetQuota", "test-bucket", int64(10)).Return(nil) + mockedS3Client.On("ListBuckets").Return([]string{}, nil) + mockedS3Client.On("GetPolicyInfo", "example-policy").Return(nil, nil) + existingPolicy := []byte(`{ +"Version": "2012-10-17", +"Statement": [ +{ + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": "arn:aws:s3:::my-bucket/*" +} +] +}`) + mockedS3Client.On("GetPolicyInfo", "existing-policy").Return(&madmin.PolicyInfo{PolicyName: "existing-policy", Policy: existingPolicy}, nil) + mockedS3Client.On("CreateOrUpdatePolicy", "existing-policy", mock.AnythingOfType("string")).Return(nil) + mockedS3Client.On("CreateOrUpdatePolicy", "example-policy", "").Return(nil) + mockedS3Client.On("PathExists", "existing-bucket", "mypath").Return(false, nil) + mockedS3Client.On("CreatePath", "existing-bucket", "mypath").Return(nil) + mockedS3Client.On("UserExist", "example-user").Return(false, nil) + mockedS3Client.On("CreateUser", "example-user", mock.AnythingOfType("string")).Return(nil) + mockedS3Client.On("AddPoliciesToUser", "example-user", mock.AnythingOfType("[]string")).Return(nil) + + mockedS3Client.On("UserExist", "existing-valid-user").Return(true, nil) + mockedS3Client.On("CreateUser", "existing-valid-user", mock.AnythingOfType("string")).Return(nil) + mockedS3Client.On("AddPoliciesToUser", "existing-valid-user", mock.AnythingOfType("[]string")).Return(nil) + + mockedS3Client.On("CheckUserCredentialsValid", "existing-valid-user", "existing-valid-user", "invalidSecret").Return(false, nil) + mockedS3Client.On("CheckUserCredentialsValid", "existing-valid-user", "existing-valid-user", "invalidSecret").Return(false, nil) + mockedS3Client.On("CheckUserCredentialsValid", "existing-valid-user", "existing-valid-user", "validSecret").Return(true, nil) + mockedS3Client.On("CheckUserCredentialsValid", "existing-valid-user", "existing-valid-user", mock.AnythingOfType("string")).Return(true, nil) + mockedS3Client.On("GetQuota", "existing-bucket").Return(10, nil) + mockedS3Client.On("GetQuota", "existing-invalid-bucket").Return(10, nil) + mockedS3Client.On("SetQuota", "existing-invalid-bucket", int64(100)).Return(nil) + mockedS3Client.On("GetUserPolicies", "existing-valid-user").Return([]string{"admin"}, nil) + mockedS3Client.On("PathExists", "existing-bucket", "example").Return(true, nil) + mockedS3Client.On("PathExists", "existing-invalid-bucket", "example").Return(true, nil) + mockedS3Client.On("PathExists", "existing-invalid-bucket", "non-existing").Return(false, nil) + mockedS3Client.On("BucketExists", "existing-invalid-bucket").Return(true, nil) + mockedS3Client.On("BucketExists", "non-existing-bucket").Return(false, nil) + + mockedS3Client.On("CreatePath", "existing-invalid-bucket", "non-existing").Return(nil) + + mockedS3Client.On("DeleteUser", "existing-valid-user").Return(nil) + + mockedInvalidS3Client := mocks.NewMockedS3Client() + mockedInvalidS3Client.On("BucketExists", "test-bucket").Return(false, nil) + mockedInvalidS3Client.On("CreateBucket", "test-bucket").Return(nil) + mockedInvalidS3Client.On("SetQuota", "test-bucket", int64(10)).Return(nil) + + mockedInvalidS3Client.On("ListBuckets").Return([]string{}, fmt.Errorf("random error")) + + mockedS3factory := mocks.NewMockedS3ClientFactory() + mockedS3factory.On("GenerateS3Client", "minio", mock.MatchedBy(func(cfg *s3client.S3Config) bool { + return cfg.S3Url == "https://minio.example.com" + })).Return(mockedS3Client, nil) + mockedS3factory.On("GenerateS3Client", "minio", mock.MatchedBy(func(cfg *s3client.S3Config) bool { + return cfg.S3Url == "https://minio.invalid.example.com" + })).Return(mockedInvalidS3Client, nil) + + t.S3Factory = mockedS3factory +} + +func (t *TestUtils) SetupDefaultS3instance() *s3v1alpha1.S3Instance { + s3Instance := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + Status: s3v1alpha1.S3InstanceStatus{Conditions: []metav1.Condition{{Reason: s3v1alpha1.Reconciled}}}, + } + + return s3Instance +} + +func (t *TestUtils) GenerateBasicS3InstanceAndSecret() (*s3v1alpha1.S3Instance, *corev1.Secret) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + Status: s3v1alpha1.S3InstanceStatus{Conditions: []metav1.Condition{{Reason: s3v1alpha1.Reconciled}}}, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + return s3instanceResource, secretResource +} + +func (t *TestUtils) SetupClient(objects []client.Object) { + // Register the custom resource with the scheme sch := runtime.NewScheme() + s := scheme.Scheme + s3v1alpha1.AddToScheme(s) + corev1.AddToScheme(s) + + client := fake.NewClientBuilder(). + WithScheme(s). + WithObjects(objects...). + WithStatusSubresource(objects...). + Build() + + t.Client = client +}