diff --git a/.dockerignore b/.dockerignore index 0f04682..cbdb0ca 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,4 +1,4 @@ -# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file -# Ignore build and test binaries. -bin/ -testbin/ +# More info: https://docs.docker.com/engine/reference/builder/#dockerignore-file +# Ignore build and test binaries. +bin/ +testbin/ diff --git a/.github/workflows/ci-chart.yaml b/.github/workflows/ci-chart.yaml new file mode 100644 index 0000000..2e8caec --- /dev/null +++ b/.github/workflows/ci-chart.yaml @@ -0,0 +1,60 @@ +name: Helm CI + +on: + push: + branches: + - "**" + paths-ignore: + - 'docs/**' + tags: + - "*" + pull_request: + paths-ignore: + - 'docs/**' + +jobs: + + helm-lint: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Helm on Runner + uses: azure/setup-helm@v4.2.0 + + - name: Lint Chart + run: helm lint . + + helm-unitest: + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Helm on Runner + uses: azure/setup-helm@v4.2.0 + + - name: Lint Chart + run: echo "TODO" + + release_helm: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Install Helm on Runner + uses: azure/setup-helm@v4.2.0 + + - name: Configure Git + run: | + git config user.name "$GITHUB_ACTOR" + git config user.email "$GITHUB_ACTOR@users.noreply.github.com" + + - name: Run chart-releaser + uses: helm/chart-releaser-action@v1.6.0 + with: + charts_dir: deploy/charts + skip_existing: true + env: + CR_TOKEN: "${{ secrets.CR_TOKEN }}" diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci-docker.yaml similarity index 61% rename from .github/workflows/ci.yaml rename to .github/workflows/ci-docker.yaml index fc9cb7e..c548e77 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci-docker.yaml @@ -1,38 +1,69 @@ -on: [push] -name: build -jobs: - build: +name: Docker CI + +on: + push: + branches: + - "**" + paths-ignore: + - 'docs/**' + tags: + - "*" + pull_request: + paths-ignore: + - 'docs/**' + +jobs: + + lint-docker: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - uses: hadolint/hadolint-action@v3.1.0 + with: + dockerfile: Dockerfile + + build-docker: outputs: version: ${{ steps.get_version.outputs.VERSION }} runs-on: ubuntu-latest steps: - name: Checkout code uses: actions/checkout@v4 + - name: Docker meta id: docker_meta - uses: crazy-max/ghaction-docker-meta@v5.1.0 + uses: docker/metadata-action@v5 with: images: inseefrlab/s3-operator # list of Docker images to use as base name for tags + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + - name: Set up QEMU uses: docker/setup-qemu-action@v3 + - name: Set up Docker Buildx uses: docker/setup-buildx-action@v3 + - name: Login to DockerHub if: github.event_name != 'pull_request' uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} + - name: Build and push - uses: docker/build-push-action@v5 + uses: docker/build-push-action@v6 with: context: . file: ./Dockerfile push: ${{ github.event_name != 'pull_request' }} - # Use tags computed before and also latest if on master + # Use tags computed before tags: | ${{ steps.docker_meta.outputs.tags }} - ${{ github.ref == 'refs/heads/main' && 'inseefrlab/s3-operator:latest' || '' }} labels: ${{ steps.docker_meta.outputs.labels }} platforms: linux/amd64,linux/arm64 - name: Image digest diff --git a/.github/workflows/ci-go.yaml b/.github/workflows/ci-go.yaml new file mode 100644 index 0000000..322420d --- /dev/null +++ b/.github/workflows/ci-go.yaml @@ -0,0 +1,41 @@ +name: Golang CI + +on: + push: + branches: + - "**" + paths-ignore: + - 'docs/**' + tags: + - "*" + pull_request: + paths-ignore: + - 'docs/**' + +jobs: + + go: + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: '1.23.x' + - name: Install dependencies + run: go mod download + - name: Test with Go + run: go test -v ./... -coverprofile cover.out + - name: Upload Go test results + uses: actions/upload-artifact@v4 + with: + name: coverage + path: cover.out + - name: Build + run: go build -v ./... + - name: 'Upload Artifact' + uses: actions/upload-artifact@v4 + with: + name: manager + path: ./bin/manager diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml new file mode 100644 index 0000000..060d775 --- /dev/null +++ b/.github/workflows/release.yaml @@ -0,0 +1,40 @@ +on: + push: + # Sequence of patterns matched against refs/tags + tags: + - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 + +name: Upload Release Asset + +jobs: + upload-release-assets: + name: Upload Release Asset + needs: build-go + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + - name: Download a single artifact + uses: actions/download-artifact@v4 + with: + name: manager + - name: Create Release + id: create_release + uses: actions/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.ref }} + release_name: Release ${{ github.ref }} + draft: false + prerelease: false + - name: Upload Release Asset + id: upload-release-asset + uses: actions/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps + asset_path: bin/manager + asset_name: s3-operator + asset_content_type: application/x-executable \ No newline at end of file diff --git a/.gitignore b/.gitignore index 8ab387a..ea5ca2a 100644 --- a/.gitignore +++ b/.gitignore @@ -1,27 +1,29 @@ - -# Binaries for programs and plugins -*.exe -*.exe~ -*.dll -*.so -*.dylib -bin -testbin/* -Dockerfile.cross - -# Test binary, build with `go test -c` -*.test - -# Output of the go coverage tool, specifically when used with LiteIDE -*.out - -# Kubernetes Generated files - skip generated files, except for vendored files - -!vendor/**/zz_generated.* - -# editor and IDE paraphernalia -.idea -*.swp -*.swo -*~ -.vscode \ No newline at end of file + +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib +bin +testbin/* +Dockerfile.cross + +# Test binary, build with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out + +# Kubernetes Generated files - skip generated files, except for vendored files + +!vendor/**/zz_generated.* + +# editor and IDE paraphernalia +.idea +*.swp +*.swo +*~ +.vscode + +values-local.yaml \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 31c5110..8cf36d2 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,33 +1,34 @@ -# Build the manager binary -FROM golang:1.22 as builder -ARG TARGETOS -ARG TARGETARCH - -WORKDIR /workspace -# Copy the Go Modules manifests -COPY go.mod go.mod -COPY go.sum go.sum -# cache deps before building and copying source so that we don't need to re-download as much -# and so that source changes don't invalidate our downloaded layer -RUN go mod download - -# Copy the go source -COPY main.go main.go -COPY api/ api/ -COPY controllers/ controllers/ - -# Build -# the GOARCH has not a default value to allow the binary be built according to the host where the command -# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO -# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, -# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. -RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager main.go - -# Use distroless as minimal base image to package the manager binary -# Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:nonroot -WORKDIR / -COPY --from=builder /workspace/manager . -USER 65532:65532 - -ENTRYPOINT ["/manager"] +# Build the manager binary +FROM golang:1.23 as builder +ARG TARGETOS +ARG TARGETARCH + +WORKDIR /workspace +# Copy the Go Modules manifests +COPY go.mod go.mod +COPY go.sum go.sum +# cache deps before building and copying source so that we don't need to re-download as much +# and so that source changes don't invalidate our downloaded layer +RUN go mod download + +# Copy the go source +COPY cmd/main.go cmd/main.go +COPY api/ api/ +COPY internal/ internal/ + + +# Build +# the GOARCH has not a default value to allow the binary be built according to the host where the command +# was called. For example, if we call make docker-build in a local env which has the Apple Silicon M1 SO +# the docker BUILDPLATFORM arg will be linux/arm64 when for Apple x86 it will be linux/amd64. Therefore, +# by leaving it empty we can ensure that the container and binary shipped on it will have the same platform. +RUN CGO_ENABLED=0 GOOS=${TARGETOS:-linux} GOARCH=${TARGETARCH} go build -a -o manager cmd/main.go + +# Use distroless as minimal base image to package the manager binary +# Refer to https://github.com/GoogleContainerTools/distroless for more details +FROM gcr.io/distroless/static:nonroot +WORKDIR / +COPY --from=builder /workspace/manager . +USER 65532:65532 + +ENTRYPOINT ["/manager"] diff --git a/LICENSE b/LICENSE index 72e15a7..df131e1 100644 --- a/LICENSE +++ b/LICENSE @@ -1,21 +1,13 @@ -MIT License - -Copyright (c) 2023 InseeFrLab - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/Makefile b/Makefile index 0808b32..689d46a 100644 --- a/Makefile +++ b/Makefile @@ -48,12 +48,12 @@ endif # Set the Operator SDK version to use. By default, what is installed on the system is used. # This is useful for CI or a project to utilize a specific version of the operator-sdk toolkit. -OPERATOR_SDK_VERSION ?= v1.32.0 +OPERATOR_SDK_VERSION ?= v1.39.1 # Image URL to use all building/pushing image targets IMG ?= controller:latest # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary. -ENVTEST_K8S_VERSION = 1.26.0 +ENVTEST_K8S_VERSION = 1.32.0 # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set) ifeq (,$(shell go env GOBIN)) @@ -113,11 +113,11 @@ test: manifests generate fmt vet envtest ## Run tests. .PHONY: build build: manifests generate fmt vet ## Build manager binary. - go build -o bin/manager main.go + go build -o bin/manager cmd/main.go .PHONY: run run: manifests generate fmt vet ## Run a controller from your host. - go run ./main.go + go run ./cmd/main.go # If you wish built the manager image targeting other platforms you can use the --platform flag. # (i.e. docker build --platform linux/arm64 ). However, you must enable docker buildKit for it. @@ -184,7 +184,7 @@ ENVTEST ?= $(LOCALBIN)/setup-envtest ## Tool Versions KUSTOMIZE_VERSION ?= v3.8.7 -CONTROLLER_TOOLS_VERSION ?= v0.11.1 +CONTROLLER_TOOLS_VERSION ?= v0.17.1 KUSTOMIZE_INSTALL_SCRIPT ?= "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh" .PHONY: kustomize @@ -279,3 +279,7 @@ catalog-build: opm ## Build a catalog image. .PHONY: catalog-push catalog-push: ## Push a catalog image. $(MAKE) docker-push IMG=$(CATALOG_IMG) + +.PHONY: go-unittest +go-unittest: ## Build the bundle image. + go test -v ./... diff --git a/PROJECT b/PROJECT index 4fefe51..7ed69c8 100644 --- a/PROJECT +++ b/PROJECT @@ -4,7 +4,7 @@ # More info: https://book.kubebuilder.io/reference/project-config.html domain: onyxia.sh layout: -- go.kubebuilder.io/v3 +- go.kubebuilder.io/v4 plugins: manifests.sdk.operatorframework.io/v2: {} scorecard.sdk.operatorframework.io/v2: {} @@ -38,4 +38,22 @@ resources: kind: Path path: github.com/InseeFrLab/s3-operator/api/v1alpha1 version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: onyxia.sh + group: s3.onyxia.sh + kind: S3instance + path: github.com/InseeFrLab/s3-operator/api/v1alpha1 + version: v1alpha1 +- api: + crdVersion: v1 + namespaced: true + controller: true + domain: onyxia.sh + group: s3.onyxia.sh + kind: S3User + path: github.com/InseeFrLab/s3-operator/api/v1alpha1 + version: v1alpha1 version: "3" diff --git a/README.md b/README.md index b49de8e..f13692f 100644 --- a/README.md +++ b/README.md @@ -1,354 +1,401 @@ -# s3-operator - -This Operator SDK based tool aims at managing S3 related resources (buckets, policies, ...) using a Kubernetes-centric approach. You can set `Bucket` or `Policy` custom resources, and let the operator create or update the corresponding bucket/policy on its configured S3 instance. - -## At a glance - -- Current S3 providers : [Minio](https://github.com/InseeFrLab/s3-operator/blob/main/controllers/s3/factory/minioS3Client.go) -- Currently managed S3 resources : [buckets](https://github.com/InseeFrLab/s3-operator/blob/main/api/v1alpha1/bucket_types.go), [policies](https://github.com/InseeFrLab/s3-operator/blob/main/api/v1alpha1/policy_types.go) - -## Compatibility - -This operator has been successfully tested with : - -- Kubernetes : 1.25, 1.26, 1.27 (v0.7.0), 1.28 (v0.7.0 and v0.8.0) -- MinIO : 2023-05-27T05:56:19Z (up to v0.3.0 included), 2023-11-20T22-40-07Z (from v0.4.0 onwards) - -## Description - -At its heart, the operator revolves around CRDs that match S3 resources : - -- `buckets.s3.onyxia.sh` -- `policies.s3.onyxia.sh` -- `paths.s3.onyxia.sh` -- `users.s3.onyxia.sh` - -The custom resources based on these CRDs are a somewhat simplified projection of the real S3 resources. From the operator's point of view : - -- A `Bucket` CR matches a S3 bucket, and only has a name, a quota (actually two, [see Bucket example in *Usage* section below](#bucket)), and optionally, a set of paths -- A `Policy` CR matches a "canned" policy (not a bucket policy, but a global one, that can be attached to a user), and has a name, and its actual content (IAM JSON) -- A `Path` CR matches a set of paths inside of a policy. This is akin to the `paths` property of the `Bucket` CRD, except `Path` is not responsible for Bucket creation. -- A `S3User` CR matches a user in the s3 server, and has a name, a set of policy and a set of group. - -Each custom resource based on these CRDs on Kubernetes is to be matched with a resource on the S3 instance. If the CR and the corresponding S3 resource diverge, the operator will create or update the S3 resource to bring it back to. - -Two important caveats : - -- It is one-way - if something happens on the S3 side directly (instead of going through the CRs), the operator has no way of reacting. At best, the next trigger will overwrite the S3 state with the declared state in the k8s custom resource. -- Originally, the operator did not manage resource deletion. This has changed in release v0.8.0 (see #40), but it still isn't a focus, and the implementation is simple. For instance, bucket deletion will simply fail if bucket is not empty - no logic was added to opt-in a "forced" deletion of everything inside the bucket. - -## Installation - -The S3 operator is provided either in source form through this repository, or already built as a Docker image available on [Docker Hub](https://hub.docker.com/r/inseefrlab/s3-operator). - -### Helm - -With this Docker image, the recommended way of installing S3 Operator on your cluster is to use the Helm chart provided in the dedicated repository : https://github.com/InseeFrLab/helm-charts/tree/master/charts/s3-operator. Among other things, the chart takes care of managing a (Kubernetes) ServiceAccount for the operator to run as. The most basic way of using this chart would be : - -```shell -helm repo add inseefrlab https://inseefrlab.github.io/helm-charts # or [helm repo update] if already available -helm install s3-operator --values # see below for the parameters -``` - -### Running from source - -Alternatively, if you just wish to try out the operator without actually installing it, it is also possible to just clone this repository, and run the operator locally - outside of the Kubernetes cluster. This requires Go 1.19+, and prior installation of the CRDs located in `config/crd/bases`, typically with `kubectl`. After which, you can simply run : - -```shell -git clone https://github.com/InseeFrLab/s3-operator.git # or use a tag/release -cd s3-operator -go run main.go --s3-endpoint-url *** --s3-access-key *** --s3-secret-key *** # see below for the parameters -``` - -To quote the Operator SDK README (also visible below), running the operator this way *will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows).* RBAC-wise, you need to be able to freely manipulate the custom resources associated to the operator (`Bucket`, `Policy` and `Path`) in every namespace - [see also the generated ClusterRole manifest](https://github.com/InseeFrLab/s3-operator/blob/main/config/rbac/role.yaml). - -### Kustomize - -Finally, as this operator was generated through Operator SDK, it should be possible to use kustomize to bootstrap the operator as well. Though this method is untested by the maintainers of the project, the Operator SDK generated guidelines ([see below](#operator-sdk-generated-guidelines)) might help in making use of the various kustomize configuration files, possibly through the use of `make`. - -## Configuration - -The operator exposes a few parameters, meant to be set as arguments, though it's possible to use environment variables for some of them. When an environment variable is available, it takes precedence over the flag. - -The parameters are summarized in the table below : - -| Flag name | Default | Environment variable | Multiple values allowed | Description | -| ------------------------------- | ---------------- | -------------------- | ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | -| `health-probe-bind-address` | `:8081` | - | no | The address the probe endpoint binds to. Comes from Operator SDK. | -| `leader-elect` | `false` | - | no | Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. Comes from Operator SDK. | -| `metrics-bind-address` | `:8080` | - | no | The address the metric endpoint binds to. Comes from Operator SDK. | -| `region` | `us-east-1` | - | no | The region to configure for the S3 client. | -| `s3-access-key` | - | `S3_ACCESS_KEY` | no | The access key used to interact with the S3 server. | -| `s3-ca-certificate-base64` | - | - | yes | (Optional) Base64 encoded, PEM format CA certificate, for https requests to the S3 server. | -| `s3-ca-certificate-bundle-path` | - | - | no | (Optional) Path to a CA certificates bundle file, for https requests to the S3 server. | -| `s3-endpoint-url` | `localhost:9000` | - | no | Hostname (or hostname:port) of the S3 server. | -| `s3-provider` | `minio` | - | no | S3 provider (possible values : `minio`, `mockedS3Provider`) | -| `s3-secret-key` | - | `S3_SECRET_KEY` | no | The secret key used to interact with the S3 server. | -| `useSsl` | true | - | no | Use of SSL/TLS to connect to the S3 server | -| `bucket-deletion` | false | - | no | Trigger bucket deletion on the S3 backend upon CR deletion. Will fail if bucket is not empty. | -| `policy-deletion` | false | - | no | Trigger policy deletion on the S3 backend upon CR deletion | -| `path-deletion` | false | - | no | Trigger path deletion on the S3 backend upon CR deletion. Limited to deleting the `.keep` files used by the operator. | -| `s3User-deletion` | false | - | no | Trigger S3User deletion on the S3 backend upon CR deletion. | -| `override-existing-secret` | false | - | no | Update secret linked to s3User if already exist, else noop | - -## Minimal rights needed to work - -The Operator need at least this rights: - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:CreateBucket", - "s3:GetObject", - "s3:ListAllMyBuckets", - "s3:ListBucket", - "s3:PutObject" - ], - "Resource": [ - "arn:aws:s3:::*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "admin:CreatePolicy", - "admin:GetBucketQuota", - "admin:GetPolicy", - "admin:ListPolicy", - "admin:SetBucketQuota", - "admin:CreateUser", - "admin:ListUsers", - "admin:DeleteUser", - "admin:GetUser", - "admin:AddUserToGroup", - "admin:RemoveUserFromGroup", - "admin:AttachUserOrGroupPolicy", - "admin:ListUserPolicies" - - ], - "Resource": [ - "arn:aws:s3:::*" - ] - } - ] -} - -``` - -## Usage - -- The first step is to install the CRDs in your Kubernetes cluster. The Helm chart will do just that, but it is also possible to do it manually - the manifests are in the [`config/crd/bases`](https://github.com/InseeFrLab/s3-operator/tree/main/config/crd/bases) folder. -- With the CRDs available and the operator running, all that's left is to create some custom resources - you'll find some commented examples in the subsections below. -- As soon as a custom resource is created, the operator will react, and create or update a S3 resource accordingly. -- The same will happen if you modify a CR - the operator will adjust the S3 bucket or policy accordingly - with the notable exception that it will not delete paths for buckets. -- Upon deleting a CR, the corresponding bucket or policy will be left as is, as mentioned in the [*Description* section above](#description) - -### Bucket example - -```yaml -apiVersion: s3.onyxia.sh/v1alpha1 -kind: Bucket -metadata: - labels: - app.kubernetes.io/name: bucket - app.kubernetes.io/instance: bucket-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator - name: bucket-sample -spec: - # Bucket name (on S3 server, as opposed to the name of the CR) - name: dummy-bucket - - # Paths to create on the bucket - # As it is not possible to create empty paths on a S3 server, (limitation of either S3, - # or at least Minio, the only currently implemented provider), this will actually create - # a .keep file at the deepest folder in the path. - paths: - - a_path - - another/deeper/path - - # Quota to set on the bucket, in bytes (so 1000000000 would be 1GB). - # This is split over two different parameters, although there is only one actual quota - # - "default" is required, and is used as the baseline - # - "override" is optional, and as the name implies, takes precedence over "default" - # Though clumsy, this pattern (for lack of a better word) allows to easily change the - # default quota for every buckets without impacting the ones that might have received - # a manual change. If this is not useful to you, you can safely skip using "override". - quota: - default: 10000000 - # override: 20000000 - -``` - -### Policy example - -```yaml -apiVersion: s3.onyxia.sh/v1alpha1 -kind: Policy -metadata: - labels: - app.kubernetes.io/name: policy - app.kubernetes.io/instance: policy-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator - name: policy-sample -spec: - # Policy name (on S3 server, as opposed to the name of the CR) - name: dummy-policy - - # Content of the policy, as a multiline string - # This should be IAM compliant JSON - follow the guidelines of the actual - # S3 provider you're using, as sometimes only a subset is available. - policyContent: >- - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:*" - ], - "Resource": [ - "arn:aws:s3:::dummy-bucket", - "arn:aws:s3:::dummy-bucket/*" - ] - } - ] - } -``` - -### Path example - -```yaml -apiVersion: s3.onyxia.sh/v1alpha1 -kind: Path -metadata: - labels: - app.kubernetes.io/name: path - app.kubernetes.io/instance: path-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator - name: path-sample -spec: - # Bucket name (on S3 server, not a Bucket CR's metadata.name) - bucketName: shared-bucket - - # Paths to create on the bucket - paths: - - /home/alice - - /home/bob - - -``` - -### S3User example - -```yaml -apiVersion: s3.onyxia.sh/v1alpha1 -kind: S3User -metadata: - labels: - app.kubernetes.io/name: user - app.kubernetes.io/instance: user-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator - name: user-sample -spec: - accessKey: user-sample - policies: - - policy-example1 - - policy-example2 - -``` - -Each S3user is linked to a kubernetes secret which have the same name that the S3User. The secret contains 2 keys: `accessKey` and `secretKey`. - -## Operator SDK generated guidelines - -
- -Click to fold / unfold - -## Getting Started - -You’ll need a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster. -**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows). - -### Running on the cluster -1. Install Instances of Custom Resources: - -```sh -kubectl apply -f config/samples/ -``` - -2. Build and push your image to the location specified by `IMG`: - -```sh -make docker-build docker-push IMG=/s3-operator:tag -``` - -3. Deploy the controller to the cluster with the image specified by `IMG`: - -```sh -make deploy IMG=/s3-operator:tag -``` - -### Uninstall CRDs -To delete the CRDs from the cluster: - -```sh -make uninstall -``` - -### Undeploy controller -UnDeploy the controller to the cluster: - -```sh -make undeploy -``` - -## Contributing - -### How it works -This project aims to follow the Kubernetes [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) - -It uses [Controllers](https://kubernetes.io/docs/concepts/architecture/controller/) -which provides a reconcile function responsible for synchronizing resources untile the desired state is reached on the cluster - -### Test It Out -1. Install the CRDs into the cluster: - -```sh -make install -``` - -2. Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running): - -```sh -make run -``` - -**NOTE:** You can also run this in one step by running: `make install run` - -### Modifying the API definitions -If you are editing the API definitions, generate the manifests such as CRs or CRDs using: - -```sh -make manifests -``` - -**NOTE:** Run `make --help` for more information on all potential `make` targets - -More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) - -
- - +# s3-operator + +This Operator SDK based tool aims at managing S3 related resources (buckets, policies, ...) using a Kubernetes-centric approach. You can set `Bucket` or `Policy` custom resources, and let the operator create or update the corresponding bucket/policy on its configured S3 instance. + +## At a glance + +- Current S3 providers : [Minio](https://github.com/InseeFrLab/s3-operator/blob/main/internal/s3/factory/minioS3Client.go) +- Currently managed S3 resources : [buckets](https://github.com/InseeFrLab/s3-operator/blob/main/api/v1alpha1/bucket_types.go), [policies](https://github.com/InseeFrLab/s3-operator/blob/main/api/v1alpha1/policy_types.go) + +## Compatibility + +This operator has been successfully tested with : + +- Kubernetes : 1.25, 1.26, 1.27 (v0.7.0), 1.28 (v0.7.0 and v0.8.0) +- MinIO : 2023-05-27T05:56:19Z (up to v0.3.0 included), 2023-11-20T22-40-07Z (from v0.4.0 onwards) + +## Description + +At its heart, the operator revolves around CRDs that match S3 resources : + +- `buckets.s3.onyxia.sh` +- `policies.s3.onyxia.sh` +- `paths.s3.onyxia.sh` +- `s3Users.s3.onyxia.sh` +- `s3Instances.s3.onyxia.sh` + +The custom resources based on these CRDs are a somewhat simplified projection of the real S3 resources. From the operator's point of view : + +- A `Bucket` CR matches a S3 bucket, and only has a name, a quota (actually two, [see Bucket example in *Usage* section below](#bucket)), and optionally, a set of paths +- A `Policy` CR matches a "canned" policy (not a bucket policy, but a global one, that can be attached to a user), and has a name, and its actual content (IAM JSON) +- A `Path` CR matches a set of paths inside of a policy. This is akin to the `paths` property of the `Bucket` CRD, except `Path` is not responsible for Bucket creation. +- A `S3User` CR matches a user in the s3 server, and has a name, a set of policy and a set of group. +- A `S3Instance` CR matches a s3Instance. + +Each custom resource based on these CRDs on Kubernetes is to be matched with a resource on the S3 instance. If the CR and the corresponding S3 resource diverge, the operator will create or update the S3 resource to bring it back to. + +Two important caveats : + +- It is one-way - if something happens on the S3 side directly (instead of going through the CRs), the operator has no way of reacting. At best, the next trigger will overwrite the S3 state with the declared state in the k8s custom resource. +- Originally, the operator did not manage resource deletion. This has changed in release v0.8.0 (see #40), but it still isn't a focus, and the implementation is simple. For instance, bucket deletion will simply fail if bucket is not empty - no logic was added to opt-in a "forced" deletion of everything inside the bucket. + +## Installation + +The S3 operator is provided either in source form through this repository, or already built as a Docker image available on [Docker Hub](https://hub.docker.com/r/inseefrlab/s3-operator). + +### Helm + +With this Docker image, the recommended way of installing S3 Operator on your cluster is to use the Helm chart provided in the dedicated repository : https://github.com/InseeFrLab/helm-charts/tree/master/charts/s3-operator. Among other things, the chart takes care of managing a (Kubernetes) ServiceAccount for the operator to run as. The most basic way of using this chart would be : + +```shell +helm repo add inseefrlab https://inseefrlab.github.io/helm-charts # or [helm repo update] if already available +helm install s3-operator --values # see below for the parameters +``` + +### Running from source + +Alternatively, if you just wish to try out the operator without actually installing it, it is also possible to just clone this repository, and run the operator locally - outside of the Kubernetes cluster. This requires Go 1.19+, and prior installation of the CRDs located in `config/crd/bases`, typically with `kubectl`. After which, you can simply run : + +```shell +git clone https://github.com/InseeFrLab/s3-operator.git # or use a tag/release +cd s3-operator +go run main.go --s3-endpoint-url *** --s3-access-key *** --s3-secret-key *** # see below for the parameters +``` + +To quote the Operator SDK README (also visible below), running the operator this way *will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows).* RBAC-wise, you need to be able to freely manipulate the custom resources associated to the operator (`Bucket`, `Policy` and `Path`) in every namespace - [see also the generated ClusterRole manifest](https://github.com/InseeFrLab/s3-operator/blob/main/config/rbac/role.yaml). + +### Kustomize + +Finally, as this operator was generated through Operator SDK, it should be possible to use kustomize to bootstrap the operator as well. Though this method is untested by the maintainers of the project, the Operator SDK generated guidelines ([see below](#operator-sdk-generated-guidelines)) might help in making use of the various kustomize configuration files, possibly through the use of `make`. + +## Configuration + +The operator exposes a few parameters, meant to be set as arguments, though it's possible to use environment variables for some of them. When an environment variable is available, it takes precedence over the flag. + +The parameters are summarized in the table below : + +| Flag name | Default | Environment variable | Multiple values allowed | Description | +| --------------------------- | ------- | -------------------- | ----------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | +| `health-probe-bind-address` | `:8081` | - | no | The address the probe endpoint binds to. Comes from Operator SDK. | +| `leader-elect` | `false` | - | no | Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager. Comes from Operator SDK. | +| `metrics-bind-address` | `:8080` | - | no | The address the metric endpoint binds to. Comes from Operator SDK. | | +| `override-existing-secret` | false | - | no | Update secret linked to s3User if already exist, else noop | +## Minimal rights needed to work + +The Operator need at least this rights: + +```json +{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:CreateBucket", + "s3:GetObject", + "s3:ListAllMyBuckets", + "s3:ListBucket", + "s3:PutObject" + ], + "Resource": [ + "arn:aws:s3:::*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "admin:CreatePolicy", + "admin:GetBucketQuota", + "admin:GetPolicy", + "admin:ListPolicy", + "admin:SetBucketQuota", + "admin:CreateUser", + "admin:ListUsers", + "admin:DeleteUser", + "admin:GetUser", + "admin:AddUserToGroup", + "admin:RemoveUserFromGroup", + "admin:AttachUserOrGroupPolicy", + "admin:ListUserPolicies" + + ], + "Resource": [ + "arn:aws:s3:::*" + ] + } + ] +} + +``` + +## Usage + +- The first step is to install the CRDs in your Kubernetes cluster. The Helm chart will do just that, but it is also possible to do it manually - the manifests are in the [`config/crd/bases`](https://github.com/InseeFrLab/s3-operator/tree/main/config/crd/bases) folder. +- With the CRDs available and the operator running, all that's left is to create some custom resources - you'll find some commented examples in the subsections below. +- As soon as a custom resource is created, the operator will react, and create or update a S3 resource accordingly. +- The same will happen if you modify a CR - the operator will adjust the S3 bucket or policy accordingly - with the notable exception that it will not delete paths for buckets. +- Upon deleting a CR, the corresponding bucket or policy will be left as is, as mentioned in the [*Description* section above](#description) + +An instance of S3Operator can manage multiple S3. On each resource created you can set where to create it. To add multiple instance of S3 see S3Instance example. On each object deployed you can attach it to an existing s3Instance. If no instance is set on the resource, S3Operator will failback to default instance configured by env var. + +### S3Instance example + +```yaml +apiVersion: s3.onyxia.sh/v1alpha1 +kind: S3Instance +metadata: + labels: + app.kubernetes.io/name: bucket + app.kubernetes.io/instance: bucket-sample + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: s3-operator + name: s3-default-instance # Name of the S3Instance +spec: + s3Provider: minio # Type of the Provider. Can be "mockedS3Provider" or "minio" + url: https://minio.example.com # URL of the Provider + secretRef: minio-credentials # Name of the secret containing 2 Keys S3_ACCESS_KEY and S3_SECRET_KEY + caCertSecretRef: minio-certs # Name of the secret containing key ca.crt with cert of s3provider + region: us-east-1 # Region of the Provider + allowedNamespaces: [] # namespaces allowed to have buckets, policies, ... Wildcard prefix/suffix allowed. If empty only the same namespace as s3instance is allowed + bucketDeletionEnabled: true # Allowed bucket entity suppression on s3instance + policyDeletionEnabled: true # Allowed policy entity suppression on s3instance + pathDeletionEnabled: true # Allowed path entity suppression on s3instance + s3UserDeletionEnabled: true # Allowed s3User entity suppression on s3instance +``` + +### Bucket example + +```yaml +apiVersion: s3.onyxia.sh/v1alpha1 +kind: Bucket +metadata: + labels: + app.kubernetes.io/name: bucket + app.kubernetes.io/instance: bucket-sample + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: s3-operator + name: bucket-sample +spec: + # Bucket name (on S3 server, as opposed to the name of the CR) + name: dummy-bucket + + # Paths to create on the bucket + # As it is not possible to create empty paths on a S3 server, (limitation of either S3, + # or at least Minio, the only currently implemented provider), this will actually create + # a .keep file at the deepest folder in the path. + paths: + - a_path + - another/deeper/path + + # Quota to set on the bucket, in bytes (so 1000000000 would be 1GB). + # This is split over two different parameters, although there is only one actual quota + # - "default" is required, and is used as the baseline + # - "override" is optional, and as the name implies, takes precedence over "default" + # Though clumsy, this pattern (for lack of a better word) allows to easily change the + # default quota for every buckets without impacting the ones that might have received + # a manual change. If this is not useful to you, you can safely skip using "override". + quota: + default: 10000000 + # override: 20000000 + + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" + + +``` + +### Policy example + +```yaml +apiVersion: s3.onyxia.sh/v1alpha1 +kind: Policy +metadata: + labels: + app.kubernetes.io/name: policy + app.kubernetes.io/instance: policy-sample + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: s3-operator + name: policy-sample +spec: + # Policy name (on S3 server, as opposed to the name of the CR) + name: dummy-policy + + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" + + # Content of the policy, as a multiline string + # This should be IAM compliant JSON - follow the guidelines of the actual + # S3 provider you're using, as sometimes only a subset is available. + The first Statement (Allow ListBucket) should be applied to every user, + # as s3-operator uses this call to verify that credentials are valid when + # reconciling an existing user. + policyContent: >- + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:ListBucket" + ], + "Resource": [ + "arn:aws:s3:::*" + ] + }, + { + "Effect": "Allow", + "Action": [ + "s3:*" + ], + "Resource": [ + "arn:aws:s3:::dummy-bucket", + "arn:aws:s3:::dummy-bucket/*" + ] + } + ] + } +``` + +### Path example + +```yaml +apiVersion: s3.onyxia.sh/v1alpha1 +kind: Path +metadata: + labels: + app.kubernetes.io/name: path + app.kubernetes.io/instance: path-sample + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: s3-operator + name: path-sample +spec: + # Bucket name (on S3 server, not a Bucket CR's metadata.name) + bucketName: shared-bucket + + # Paths to create on the bucket + paths: + - /home/alice + - /home/bob + + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" + +``` + +### S3User example + +```yaml +apiVersion: s3.onyxia.sh/v1alpha1 +kind: S3User +metadata: + labels: + app.kubernetes.io/name: user + app.kubernetes.io/instance: user-sample + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + app.kubernetes.io/created-by: s3-operator + name: user-sample +spec: + accessKey: user-sample + policies: + - policy-example1 + - policy-example2 + # Optionnal, let empty if you have configured the default s3 else use an existing s3Instance + s3InstanceRef: "s3-default-instance" + +``` + +Each S3user is linked to a kubernetes secret which have the same name that the S3User. The secret contains 2 keys: `accessKey` and `secretKey`. + +### :info: How works s3InstanceRef + +S3InstanceRef can get the following values: +- empty: In this case the s3instance use will be the default one configured at startup if the namespace is in the namespace allowed for this s3Instance +- `s3InstanceName`: In this case the s3Instance use will be the s3Instance with the name `s3InstanceName` in the current namespace (if the current namespace is allowed) +- `namespace/s3InstanceName`: In this case the s3Instance use will be the s3Instance with the name `s3InstanceName` in the namespace `namespace` (if the current namespace is allowed to use this s3Instance) + +## Operator SDK generated guidelines + +
+ +Click to fold / unfold + +## Getting Started + +You’ll need a Kubernetes cluster to run against. You can use [KIND](https://sigs.k8s.io/kind) to get a local cluster for testing, or run against a remote cluster. +**Note:** Your controller will automatically use the current context in your kubeconfig file (i.e. whatever cluster `kubectl cluster-info` shows). + +### Running on the cluster +1. Install Instances of Custom Resources: + +```sh +kubectl apply -f config/samples/ +``` + +2. Build and push your image to the location specified by `IMG`: + +```sh +make docker-build docker-push IMG=/s3-operator:tag +``` + +3. Deploy the controller to the cluster with the image specified by `IMG`: + +```sh +make deploy IMG=/s3-operator:tag +``` + +### Uninstall CRDs +To delete the CRDs from the cluster: + +```sh +make uninstall +``` + +### Undeploy controller +UnDeploy the controller to the cluster: + +```sh +make undeploy +``` + +## Contributing + +### How it works +This project aims to follow the Kubernetes [Operator pattern](https://kubernetes.io/docs/concepts/extend-kubernetes/operator/) + +It uses [Controllers](https://kubernetes.io/docs/concepts/architecture/controller/) +which provides a reconcile function responsible for synchronizing resources untile the desired state is reached on the cluster + +### Test It Out +1. Install the CRDs into the cluster: + +```sh +make install +``` + +2. Run your controller (this will run in the foreground, so switch to a new terminal if you want to leave it running): + +```sh +make run +``` + +**NOTE:** You can also run this in one step by running: `make install run` + +### Modifying the API definitions +If you are editing the API definitions, generate the manifests such as CRs or CRDs using: + +```sh +make manifests +``` + +**NOTE:** Run `make --help` for more information on all potential `make` targets + +More information can be found via the [Kubebuilder Documentation](https://book.kubebuilder.io/introduction.html) + +
+ + diff --git a/api/v1alpha1/bucket_types.go b/api/v1alpha1/bucket_types.go index 092aac6..578372f 100644 --- a/api/v1alpha1/bucket_types.go +++ b/api/v1alpha1/bucket_types.go @@ -36,6 +36,14 @@ type BucketSpec struct { // +kubebuilder:validation:Optional Paths []string `json:"paths,omitempty"` + // s3InstanceRef where create the bucket + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=127 + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="s3InstanceRef is immutable" + // +kubebuilder:default=s3-operator/default + S3InstanceRef string `json:"s3InstanceRef"` + // Quota to apply to the bucket // +kubebuilder:validation:Required Quota Quota `json:"quota"` @@ -43,7 +51,7 @@ type BucketSpec struct { // BucketStatus defines the observed state of Bucket type BucketStatus struct { - // Status management using Conditions. + // Status management using Conditions. // See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } @@ -64,15 +72,11 @@ type Bucket struct { // BucketList contains a list of Bucket type BucketList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` + metav1.TypeMeta ` json:",inline"` + metav1.ListMeta ` json:"metadata,omitempty"` Items []Bucket `json:"items"` } -func init() { - SchemeBuilder.Register(&Bucket{}, &BucketList{}) -} - type Quota struct { // Default quota to apply, mandatory // +kubebuilder:validation:Required @@ -82,3 +86,7 @@ type Quota struct { // +kubebuilder:validation:Optional Override int64 `json:"override,omitempty"` } + +func init() { + SchemeBuilder.Register(&Bucket{}, &BucketList{}) +} diff --git a/api/v1alpha1/path_types.go b/api/v1alpha1/path_types.go index 58f5aad..1f69c86 100644 --- a/api/v1alpha1/path_types.go +++ b/api/v1alpha1/path_types.go @@ -35,6 +35,14 @@ type PathSpec struct { // Paths (folders) to create inside the bucket // +kubebuilder:validation:Optional Paths []string `json:"paths,omitempty"` + + // s3InstanceRef where create the Paths + // +kubebuilder:default=s3-operator/default + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="s3InstanceRef is immutable" + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=127 + S3InstanceRef string `json:"s3InstanceRef,omitempty"` } // PathStatus defines the observed state of Path @@ -60,8 +68,8 @@ type Path struct { // PathList contains a list of Path type PathList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` + metav1.TypeMeta ` json:",inline"` + metav1.ListMeta ` json:"metadata,omitempty"` Items []Path `json:"items"` } diff --git a/api/v1alpha1/policy_types.go b/api/v1alpha1/policy_types.go index 6862f5e..17078af 100644 --- a/api/v1alpha1/policy_types.go +++ b/api/v1alpha1/policy_types.go @@ -35,11 +35,19 @@ type PolicySpec struct { // +kubebuilder:validation:Required // Content of the policy (IAM JSON format) PolicyContent string `json:"policyContent"` + + // s3InstanceRef where create the Policy + // +kubebuilder:default=s3-operator/default + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="s3InstanceRef is immutable" + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=127 + S3InstanceRef string `json:"s3InstanceRef,omitempty"` } // PolicyStatus defines the observed state of Policy type PolicyStatus struct { - // Status management using Conditions. + // Status management using Conditions. // See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` } @@ -60,8 +68,8 @@ type Policy struct { // PolicyList contains a list of Policy type PolicyList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` + metav1.TypeMeta ` json:",inline"` + metav1.ListMeta ` json:"metadata,omitempty"` Items []Policy `json:"items"` } diff --git a/api/v1alpha1/s3instance_types.go b/api/v1alpha1/s3instance_types.go new file mode 100644 index 0000000..fa252d7 --- /dev/null +++ b/api/v1alpha1/s3instance_types.go @@ -0,0 +1,103 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN! +// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized. + +// S3InstanceSpec defines the desired state of S3Instance +type S3InstanceSpec struct { + + // type of the S3Instance + // +kubebuilder:validation:Required + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="S3Provider is immutable" + // +kubebuilder:default=minio + // +kubebuilder:validation:Enum=minio;mockedS3Provider + S3Provider string `json:"s3Provider,omitempty"` + + // url of the S3Instance + // +kubebuilder:validation:Required + Url string `json:"url"` + + // Ref to Secret associated to the S3Instance containing accessKey and secretKey + // +kubebuilder:validation:Required + SecretRef string `json:"secretRef"` + + // region associated to the S3Instance + // +kubebuilder:validation:Optional + Region string `json:"region,omitempty"` + + // Secret containing key ca.crt with the certificate associated to the S3InstanceUrl + // +kubebuilder:validation:Optional + CaCertSecretRef string `json:"caCertSecretRef,omitempty"` + + // AllowedNamespaces to use this S3InstanceUrl if empty only the namespace of this instance url is allowed to use it + // +kubebuilder:validation:Optional + AllowedNamespaces []string `json:"allowedNamespaces,omitempty"` + + // BucketDeletionEnabled Trigger bucket deletion on the S3 backend upon CR deletion. Will fail if bucket is not empty. + // +kubebuilder:default=false + BucketDeletionEnabled bool `json:"bucketDeletionEnabled,omitempty"` + + // PolicyDeletionEnabled Trigger policy deletion on the S3 backend upon CR deletion. + // +kubebuilder:default=false + PolicyDeletionEnabled bool `json:"policyDeletionEnabled,omitempty"` + + // PathDeletionEnabled Trigger path deletion on the S3 backend upon CR deletion. Limited to deleting the `.keep` files used by the operator. + // +kubebuilder:default=false + PathDeletionEnabled bool `json:"pathDeletionEnabled,omitempty"` + + // S3UserDeletionEnabled Trigger S3 deletion on the S3 backend upon CR deletion. + // +kubebuilder:default=false + S3UserDeletionEnabled bool `json:"s3UserDeletionEnabled,omitempty"` +} + +// S3InstanceStatus defines the observed state of S3Instance +type S3InstanceStatus struct { + // Status management using Conditions. + // See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties + Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` +} + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status + +// S3Instance is the Schema for the S3Instances API +type S3Instance struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec S3InstanceSpec `json:"spec,omitempty"` + Status S3InstanceStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// S3InstanceList contains a list of S3Instance +type S3InstanceList struct { + metav1.TypeMeta ` json:",inline"` + metav1.ListMeta ` json:"metadata,omitempty"` + Items []S3Instance `json:"items"` +} + +func init() { + SchemeBuilder.Register(&S3Instance{}, &S3InstanceList{}) +} diff --git a/api/v1alpha1/s3user_types.go b/api/v1alpha1/s3user_types.go index ac40da6..bb361c6 100644 --- a/api/v1alpha1/s3user_types.go +++ b/api/v1alpha1/s3user_types.go @@ -37,6 +37,28 @@ type S3UserSpec struct { // SecretName associated to the S3User // +kubebuilder:validation:Optional SecretName string `json:"secretName"` + + // s3InstanceRef where create the user + // +kubebuilder:default=s3-operator/default + // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="s3InstanceRef is immutable" + // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$` + // +kubebuilder:validation:MinLength=1 + // +kubebuilder:validation:MaxLength=127 + S3InstanceRef string `json:"s3InstanceRef,omitempty"` + + // SecretFieldNameAccessKey associated to the S3User + // Allow overridden the default key to store the accessKey value in the secret + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Type="string" + // +kubebuilder:default="accessKey" + SecretFieldNameAccessKey string `json:"secretFieldNameAccessKey,omitempty"` + + // SecretFieldNameSecretKey associated to the S3User + // Allow overridden the default key to store the secretKey value in the secret + // +kubebuilder:validation:Optional + // +kubebuilder:validation:Type="string" + // +kubebuilder:default="secretKey" + SecretFieldNameSecretKey string `json:"secretFieldNameSecretKey,omitempty"` } // S3UserStatus defines the observed state of S3User @@ -62,8 +84,8 @@ type S3User struct { // S3UserList contains a list of S3User type S3UserList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty"` + metav1.TypeMeta ` json:",inline"` + metav1.ListMeta ` json:"metadata,omitempty"` Items []S3User `json:"items"` } diff --git a/api/v1alpha1/types.go b/api/v1alpha1/types.go new file mode 100644 index 0000000..8a44415 --- /dev/null +++ b/api/v1alpha1/types.go @@ -0,0 +1,17 @@ +package v1alpha1 + +// Definitions to manage status condition types +const ( + // ConditionReconciled represents the status of the resource reconciliation + ConditionReconciled = "Reconciled" +) + +// Definitions to manage status condition reasons +const ( + Reconciling = "Reconciling" + Unreachable = "Unreachable" + CreationFailure = "CreationFailure" + Reconciled = "Reconciled" + DeletionFailure = "DeletionFailure" + DeletionBlocked = "DeletionBlocked" +) diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 6761cdf..f191f44 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1,5 +1,4 @@ //go:build !ignore_autogenerated -// +build !ignore_autogenerated /* Copyright 2023. @@ -340,6 +339,107 @@ func (in *Quota) DeepCopy() *Quota { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3Instance) DeepCopyInto(out *S3Instance) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3Instance. +func (in *S3Instance) DeepCopy() *S3Instance { + if in == nil { + return nil + } + out := new(S3Instance) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *S3Instance) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InstanceList) DeepCopyInto(out *S3InstanceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]S3Instance, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceList. +func (in *S3InstanceList) DeepCopy() *S3InstanceList { + if in == nil { + return nil + } + out := new(S3InstanceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *S3InstanceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InstanceSpec) DeepCopyInto(out *S3InstanceSpec) { + *out = *in + if in.AllowedNamespaces != nil { + in, out := &in.AllowedNamespaces, &out.AllowedNamespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceSpec. +func (in *S3InstanceSpec) DeepCopy() *S3InstanceSpec { + if in == nil { + return nil + } + out := new(S3InstanceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *S3InstanceStatus) DeepCopyInto(out *S3InstanceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new S3InstanceStatus. +func (in *S3InstanceStatus) DeepCopy() *S3InstanceStatus { + if in == nil { + return nil + } + out := new(S3InstanceStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *S3User) DeepCopyInto(out *S3User) { *out = *in @@ -439,4 +539,4 @@ func (in *S3UserStatus) DeepCopy() *S3UserStatus { out := new(S3UserStatus) in.DeepCopyInto(out) return out -} \ No newline at end of file +} diff --git a/main.go b/cmd/main.go similarity index 54% rename from main.go rename to cmd/main.go index 9415abf..37c86fb 100644 --- a/main.go +++ b/cmd/main.go @@ -20,14 +20,21 @@ import ( "flag" "fmt" "os" + "time" // Import all Kubernetes client auth plugins (e.g. Azure, GCP, OIDC, etc.) // to ensure that exec-entrypoint and run can make use of them. _ "k8s.io/client-go/plugin/pkg/client/auth" s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - controllers "github.com/InseeFrLab/s3-operator/controllers" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" + bucketControllers "github.com/InseeFrLab/s3-operator/internal/controller/bucket" + pathControllers "github.com/InseeFrLab/s3-operator/internal/controller/path" + policyControllers "github.com/InseeFrLab/s3-operator/internal/controller/policy" + s3InstanceControllers "github.com/InseeFrLab/s3-operator/internal/controller/s3instance" + userControllers "github.com/InseeFrLab/s3-operator/internal/controller/user" + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory/impl" + "go.uber.org/zap/zapcore" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -59,7 +66,6 @@ func (flags *ArrayFlags) Set(value string) error { func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - utilruntime.Must(s3v1alpha1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme } @@ -70,47 +76,42 @@ func main() { var probeAddr string // S3 related variables - var s3EndpointUrl string - var accessKey string - var secretKey string - var region string - var s3Provider string - var useSsl bool - var caCertificatesBase64 ArrayFlags - var caCertificatesBundlePath string - var bucketDeletion bool - var policyDeletion bool - var pathDeletion bool - var s3userDeletion bool + var reconcilePeriod time.Duration //K8S related variable var overrideExistingSecret bool - flag.StringVar(&metricsAddr, "metrics-bind-address", ":8080", "The address the metric endpoint binds to.") - flag.StringVar(&probeAddr, "health-probe-bind-address", ":8081", "The address the probe endpoint binds to.") + flag.StringVar( + &metricsAddr, + "metrics-bind-address", + ":8080", + "The address the metric endpoint binds to.", + ) + flag.StringVar( + &probeAddr, + "health-probe-bind-address", + ":8081", + "The address the probe endpoint binds to.", + ) flag.BoolVar(&enableLeaderElection, "leader-elect", false, "Enable leader election for controller manager. "+ "Enabling this will ensure there is only one active controller manager.") + flag.DurationVar(&reconcilePeriod, "reconcile-period", 0, + "Default reconcile period for controllers. Zero to disable periodic reconciliation") // S3 related flags - flag.StringVar(&s3Provider, "s3-provider", "minio", "S3 provider (possible values : minio, mockedS3Provider)") - flag.StringVar(&s3EndpointUrl, "s3-endpoint-url", "localhost:9000", "Hostname (or hostname:port) of the S3 server") - flag.StringVar(&accessKey, "s3-access-key", "ROOTNAME", "The accessKey of the acount") - flag.StringVar(&secretKey, "s3-secret-key", "CHANGEME123", "The secretKey of the acount") - flag.Var(&caCertificatesBase64, "s3-ca-certificate-base64", "(Optional) Base64 encoded, PEM format certificate file for a certificate authority, for https requests to S3") - flag.StringVar(&caCertificatesBundlePath, "s3-ca-certificate-bundle-path", "", "(Optional) Path to a CA certificate file, for https requests to S3") - flag.StringVar(®ion, "region", "us-east-1", "The region to configure for the S3 client") - flag.BoolVar(&useSsl, "useSsl", true, "Use of SSL/TLS to connect to the S3 endpoint") - flag.BoolVar(&bucketDeletion, "bucket-deletion", false, "Trigger bucket deletion on the S3 backend upon CR deletion. Will fail if bucket is not empty.") - flag.BoolVar(&policyDeletion, "policy-deletion", false, "Trigger policy deletion on the S3 backend upon CR deletion") - flag.BoolVar(&pathDeletion, "path-deletion", false, "Trigger path deletion on the S3 backend upon CR deletion. Limited to deleting the `.keep` files used by the operator.") - flag.BoolVar(&s3userDeletion, "s3user-deletion", false, "Trigger S3 deletion on the S3 backend upon CR deletion") - flag.BoolVar(&overrideExistingSecret, "override-existing-secret", false, "Override existing secret associated to user in case of the secret already exist") + flag.BoolVar( + &overrideExistingSecret, + "override-existing-secret", + false, + "Override existing secret associated to user in case of the secret already exist", + ) opts := zap.Options{ Development: true, TimeEncoder: zapcore.ISO8601TimeEncoder, } + opts.BindFlags(flag.CommandLine) flag.Parse() @@ -121,6 +122,10 @@ func main() { BindAddress: metricsAddr, } + s3Factory := s3factory.NewS3Factory() + s3InstanceHelper := helpers.NewS3InstanceHelper() + controllerHelper := helpers.NewControllerHelper() + mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ Scheme: scheme, Metrics: serverOption, @@ -150,65 +155,64 @@ func main() { os.Exit(1) } - // For S3 access key and secret key, we first try to read the values from environment variables. - // Only if these are not defined do we use the respective flags. - var accessKeyFromEnvIfAvailable = os.Getenv("S3_ACCESS_KEY") - if accessKeyFromEnvIfAvailable == "" { - accessKeyFromEnvIfAvailable = accessKey - } - var secretKeyFromEnvIfAvailable = os.Getenv("S3_SECRET_KEY") - if secretKeyFromEnvIfAvailable == "" { - secretKeyFromEnvIfAvailable = secretKey - } - - // Creation of the S3 client - s3Config := &factory.S3Config{S3Provider: s3Provider, S3UrlEndpoint: s3EndpointUrl, Region: region, AccessKey: accessKeyFromEnvIfAvailable, SecretKey: secretKeyFromEnvIfAvailable, UseSsl: useSsl, CaCertificatesBase64: caCertificatesBase64, CaBundlePath: caCertificatesBundlePath} - s3Client, err := factory.GetS3Client(s3Config.S3Provider, s3Config) - if err != nil { - // setupLog.Log.Error(err, err.Error()) - // fmt.Print(s3Client) - // fmt.Print(err) - setupLog.Error(err, "an error occurred while creating the S3 client", "s3Client", s3Client) + if err = (&s3InstanceControllers.S3InstanceReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ReconcilePeriod: reconcilePeriod, + S3factory: s3Factory, + ControllerHelper: controllerHelper, + S3Instancehelper: s3InstanceHelper, + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "S3Instance") os.Exit(1) } - - if err = (&controllers.BucketReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - S3Client: s3Client, - BucketDeletion: bucketDeletion, + if err = (&bucketControllers.BucketReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ReconcilePeriod: reconcilePeriod, + S3factory: s3Factory, + ControllerHelper: controllerHelper, + S3Instancehelper: s3InstanceHelper, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Bucket") os.Exit(1) } - if err = (&controllers.PathReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - S3Client: s3Client, - PathDeletion: pathDeletion, + if err = (&pathControllers.PathReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ReconcilePeriod: reconcilePeriod, + S3factory: s3Factory, + ControllerHelper: controllerHelper, + S3Instancehelper: s3InstanceHelper, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Path") os.Exit(1) } - if err = (&controllers.PolicyReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - S3Client: s3Client, - PolicyDeletion: policyDeletion, + if err = (&policyControllers.PolicyReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + ReconcilePeriod: reconcilePeriod, + S3factory: s3Factory, + ControllerHelper: controllerHelper, + S3Instancehelper: s3InstanceHelper, }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "Policy") os.Exit(1) } - if err = (&controllers.S3UserReconciler{ - Client: mgr.GetClient(), - Scheme: mgr.GetScheme(), - S3Client: s3Client, - S3UserDeletion: s3userDeletion, - OverrideExistingSecret: overrideExistingSecret, + if err = (&userControllers.S3UserReconciler{ + Client: mgr.GetClient(), + Scheme: mgr.GetScheme(), + OverrideExistingSecret: overrideExistingSecret, + ReconcilePeriod: reconcilePeriod, + S3factory: s3Factory, + ControllerHelper: controllerHelper, + S3Instancehelper: s3InstanceHelper, + PasswordGeneratorHelper: helpers.NewPasswordGenerator(), }).SetupWithManager(mgr); err != nil { setupLog.Error(err, "unable to create controller", "controller", "S3User") os.Exit(1) } + //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/config/crd/bases/s3.onyxia.sh_buckets.yaml b/config/crd/bases/s3.onyxia.sh_buckets.yaml index 6b2cbcd..4d46e44 100644 --- a/config/crd/bases/s3.onyxia.sh_buckets.yaml +++ b/config/crd/bases/s3.onyxia.sh_buckets.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.17.1 name: buckets.s3.onyxia.sh spec: group: s3.onyxia.sh @@ -21,14 +20,19 @@ spec: description: Bucket is the Schema for the buckets API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -57,53 +61,58 @@ spec: required: - default type: object + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the bucket + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf required: - name - quota + - s3InstanceRef type: object status: description: BucketStatus defines the observed state of Bucket properties: conditions: - description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + description: |- + Status management using Conditions. + See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -118,10 +127,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/s3.onyxia.sh_paths.yaml b/config/crd/bases/s3.onyxia.sh_paths.yaml index bc55aa3..67fd1c5 100644 --- a/config/crd/bases/s3.onyxia.sh_paths.yaml +++ b/config/crd/bases/s3.onyxia.sh_paths.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.17.1 name: paths.s3.onyxia.sh spec: group: s3.onyxia.sh @@ -21,14 +20,19 @@ spec: description: Path is the Schema for the paths API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -43,6 +47,16 @@ spec: items: type: string type: array + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the Paths + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf required: - bucketName type: object @@ -50,45 +64,39 @@ spec: description: PathStatus defines the observed state of Path properties: conditions: - description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + description: |- + Status management using Conditions. + See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -103,10 +111,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/s3.onyxia.sh_policies.yaml b/config/crd/bases/s3.onyxia.sh_policies.yaml index aa78618..ce5f62e 100644 --- a/config/crd/bases/s3.onyxia.sh_policies.yaml +++ b/config/crd/bases/s3.onyxia.sh_policies.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.17.1 name: policies.s3.onyxia.sh spec: group: s3.onyxia.sh @@ -21,14 +20,19 @@ spec: description: Policy is the Schema for the policies API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -41,6 +45,16 @@ spec: policyContent: description: Content of the policy (IAM JSON format) type: string + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the Policy + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf required: - name - policyContent @@ -49,45 +63,39 @@ spec: description: PolicyStatus defines the observed state of Policy properties: conditions: - description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + description: |- + Status management using Conditions. + See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -102,10 +110,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/bases/s3.onyxia.sh_s3instances.yaml b/config/crd/bases/s3.onyxia.sh_s3instances.yaml new file mode 100644 index 0000000..8cf429d --- /dev/null +++ b/config/crd/bases/s3.onyxia.sh_s3instances.yaml @@ -0,0 +1,165 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.1 + name: s3instances.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: S3Instance + listKind: S3InstanceList + plural: s3instances + singular: s3instance + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: S3Instance is the Schema for the S3Instances API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: S3InstanceSpec defines the desired state of S3Instance + properties: + allowedNamespaces: + description: AllowedNamespaces to use this S3InstanceUrl if empty + only the namespace of this instance url is allowed to use it + items: + type: string + type: array + bucketDeletionEnabled: + default: false + description: BucketDeletionEnabled Trigger bucket deletion on the + S3 backend upon CR deletion. Will fail if bucket is not empty. + type: boolean + caCertSecretRef: + description: Secret containing key ca.crt with the certificate associated + to the S3InstanceUrl + type: string + pathDeletionEnabled: + default: false + description: PathDeletionEnabled Trigger path deletion on the S3 backend + upon CR deletion. Limited to deleting the `.keep` files used by + the operator. + type: boolean + policyDeletionEnabled: + default: false + description: PolicyDeletionEnabled Trigger policy deletion on the + S3 backend upon CR deletion. + type: boolean + region: + description: region associated to the S3Instance + type: string + s3Provider: + default: minio + description: type of the S3Instance + enum: + - minio + - mockedS3Provider + type: string + x-kubernetes-validations: + - message: S3Provider is immutable + rule: self == oldSelf + s3UserDeletionEnabled: + default: false + description: S3UserDeletionEnabled Trigger S3 deletion on the S3 backend + upon CR deletion. + type: boolean + secretRef: + description: Ref to Secret associated to the S3Instance containing + accessKey and secretKey + type: string + url: + description: url of the S3Instance + type: string + required: + - s3Provider + - secretRef + - url + type: object + status: + description: S3InstanceStatus defines the observed state of S3Instance + properties: + conditions: + description: |- + Status management using Conditions. + See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/config/crd/bases/s3.onyxia.sh_s3users.yaml b/config/crd/bases/s3.onyxia.sh_s3users.yaml index 2c46a98..5dffc6d 100644 --- a/config/crd/bases/s3.onyxia.sh_s3users.yaml +++ b/config/crd/bases/s3.onyxia.sh_s3users.yaml @@ -3,8 +3,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.11.1 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.17.1 name: s3users.s3.onyxia.sh spec: group: s3.onyxia.sh @@ -21,14 +20,19 @@ spec: description: S3User is the Schema for the S3Users API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds type: string metadata: type: object @@ -43,6 +47,28 @@ spec: items: type: string type: array + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the user + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf + secretFieldNameAccessKey: + default: accessKey + description: |- + SecretFieldNameAccessKey associated to the S3User + Allow overridden the default key to store the accessKey value in the secret + type: string + secretFieldNameSecretKey: + default: secretKey + description: |- + SecretFieldNameSecretKey associated to the S3User + Allow overridden the default key to store the secretKey value in the secret + type: string secretName: description: SecretName associated to the S3User type: string @@ -53,45 +79,39 @@ spec: description: S3UserStatus defines the observed state of S3User properties: conditions: - description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + description: |- + Status management using Conditions. + See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties items: - description: "Condition contains details for one aspect of the current - state of this API Resource. --- This struct is intended for direct - use as an array at the field path .status.conditions. For example, - \n type FooStatus struct{ // Represents the observations of a - foo's current state. // Known .status.conditions.type are: \"Available\", - \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge - // +listType=map // +listMapKey=type Conditions []metav1.Condition - `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" - protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + description: Condition contains details for one aspect of the current + state of this API Resource. properties: lastTransitionTime: - description: lastTransitionTime is the last time the condition - transitioned from one status to another. This should be when - the underlying condition changed. If that is not known, then - using the time when the API field changed is acceptable. + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. format: date-time type: string message: - description: message is a human readable message indicating - details about the transition. This may be an empty string. + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. maxLength: 32768 type: string observedGeneration: - description: observedGeneration represents the .metadata.generation - that the condition was set based upon. For instance, if .metadata.generation - is currently 12, but the .status.conditions[x].observedGeneration - is 9, the condition is out of date with respect to the current - state of the instance. + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. format: int64 minimum: 0 type: integer reason: - description: reason contains a programmatic identifier indicating - the reason for the condition's last transition. Producers - of specific condition types may define expected values and - meanings for this field, and whether the values are considered - a guaranteed API. The value should be a CamelCase string. + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. This field may not be empty. maxLength: 1024 minLength: 1 @@ -106,10 +126,6 @@ spec: type: string type: description: type of condition in CamelCase or in foo.example.com/CamelCase. - --- Many .condition.type values are consistent across resources - like Available, but because arbitrary conditions can be useful - (see .node.status.conditions), the ability to deconflict is - important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) maxLength: 316 pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ type: string diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml index c77409c..96d724f 100644 --- a/config/crd/kustomization.yaml +++ b/config/crd/kustomization.yaml @@ -1,29 +1,29 @@ -# This kustomization.yaml is not intended to be run by itself, -# since it depends on service name and namespace that are out of this kustomize package. -# It should be run by config/default -resources: -- bases/s3.onyxia.sh_buckets.yaml -- bases/s3.onyxia.sh_policies.yaml -- bases/s3.onyxia.sh_paths.yaml -- bases/s3.onyxia.sh_s3users.yaml - -#+kubebuilder:scaffold:crdkustomizeresource - -patchesStrategicMerge: -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. -# patches here are for enabling the conversion webhook for each CRD -#- patches/webhook_in_buckets.yaml -#- patches/webhook_in_policies.yaml -#- patches/webhook_in_paths.yaml -#+kubebuilder:scaffold:crdkustomizewebhookpatch - -# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. -# patches here are for enabling the CA injection for each CRD -#- patches/cainjection_in_buckets.yaml -#- patches/cainjection_in_policies.yaml -#- patches/cainjection_in_paths.yaml -#+kubebuilder:scaffold:crdkustomizecainjectionpatch - -# the following config is for teaching kustomize how to do kustomization for CRDs. -configurations: -- kustomizeconfig.yaml +# This kustomization.yaml is not intended to be run by itself, +# since it depends on service name and namespace that are out of this kustomize package. +# It should be run by config/default +resources: +- bases/s3.onyxia.sh_buckets.yaml +- bases/s3.onyxia.sh_policies.yaml +- bases/s3.onyxia.sh_paths.yaml +- bases/s3.onyxia.sh_s3users.yaml + +#+kubebuilder:scaffold:crdkustomizeresource + +patchesStrategicMerge: +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix. +# patches here are for enabling the conversion webhook for each CRD +#- patches/webhook_in_buckets.yaml +#- patches/webhook_in_policies.yaml +#- patches/webhook_in_paths.yaml +#+kubebuilder:scaffold:crdkustomizewebhookpatch + +# [CERTMANAGER] To enable cert-manager, uncomment all the sections with [CERTMANAGER] prefix. +# patches here are for enabling the CA injection for each CRD +#- patches/cainjection_in_buckets.yaml +#- patches/cainjection_in_policies.yaml +#- patches/cainjection_in_paths.yaml +#+kubebuilder:scaffold:crdkustomizecainjectionpatch + +# the following config is for teaching kustomize how to do kustomization for CRDs. +configurations: +- kustomizeconfig.yaml diff --git a/config/crd/kustomizeconfig.yaml b/config/crd/kustomizeconfig.yaml index ec5c150..52d3060 100644 --- a/config/crd/kustomizeconfig.yaml +++ b/config/crd/kustomizeconfig.yaml @@ -1,19 +1,19 @@ -# This file is for teaching kustomize how to substitute name and namespace reference in CRD -nameReference: -- kind: Service - version: v1 - fieldSpecs: - - kind: CustomResourceDefinition - version: v1 - group: apiextensions.k8s.io - path: spec/conversion/webhook/clientConfig/service/name - -namespace: -- kind: CustomResourceDefinition - version: v1 - group: apiextensions.k8s.io - path: spec/conversion/webhook/clientConfig/service/namespace - create: false - -varReference: -- path: metadata/annotations +# This file is for teaching kustomize how to substitute name and namespace reference in CRD +nameReference: +- kind: Service + version: v1 + fieldSpecs: + - kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/name + +namespace: +- kind: CustomResourceDefinition + version: v1 + group: apiextensions.k8s.io + path: spec/conversion/webhook/clientConfig/service/namespace + create: false + +varReference: +- path: metadata/annotations diff --git a/config/crd/patches/cainjection_in_buckets.yaml b/config/crd/patches/cainjection_in_buckets.yaml index 18870fa..0993c6b 100644 --- a/config/crd/patches/cainjection_in_buckets.yaml +++ b/config/crd/patches/cainjection_in_buckets.yaml @@ -1,7 +1,7 @@ -# The following patch adds a directive for certmanager to inject CA into the CRD -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) - name: buckets.s3.onyxia.sh +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: buckets.s3.onyxia.sh diff --git a/config/crd/patches/cainjection_in_paths.yaml b/config/crd/patches/cainjection_in_paths.yaml index 6c5369c..38a1e73 100644 --- a/config/crd/patches/cainjection_in_paths.yaml +++ b/config/crd/patches/cainjection_in_paths.yaml @@ -1,7 +1,7 @@ -# The following patch adds a directive for certmanager to inject CA into the CRD -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) - name: paths.s3.onyxia.sh +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: paths.s3.onyxia.sh diff --git a/config/crd/patches/cainjection_in_policies.yaml b/config/crd/patches/cainjection_in_policies.yaml index b138a6a..23b4624 100644 --- a/config/crd/patches/cainjection_in_policies.yaml +++ b/config/crd/patches/cainjection_in_policies.yaml @@ -1,7 +1,7 @@ -# The following patch adds a directive for certmanager to inject CA into the CRD -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - annotations: - cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) - name: policies.s3.onyxia.sh +# The following patch adds a directive for certmanager to inject CA into the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: policies.s3.onyxia.sh diff --git a/config/crd/patches/webhook_in_buckets.yaml b/config/crd/patches/webhook_in_buckets.yaml index 4cdfb85..4e06819 100644 --- a/config/crd/patches/webhook_in_buckets.yaml +++ b/config/crd/patches/webhook_in_buckets.yaml @@ -1,16 +1,16 @@ -# The following patch enables a conversion webhook for the CRD -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: buckets.s3.onyxia.sh -spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - service: - namespace: system - name: webhook-service - path: /convert - conversionReviewVersions: - - v1 +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: buckets.s3.onyxia.sh +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_paths.yaml b/config/crd/patches/webhook_in_paths.yaml index e9d6275..a784017 100644 --- a/config/crd/patches/webhook_in_paths.yaml +++ b/config/crd/patches/webhook_in_paths.yaml @@ -1,16 +1,16 @@ -# The following patch enables a conversion webhook for the CRD -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: paths.s3.onyxia.sh -spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - service: - namespace: system - name: webhook-service - path: /convert - conversionReviewVersions: - - v1 +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: paths.s3.onyxia.sh +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/crd/patches/webhook_in_policies.yaml b/config/crd/patches/webhook_in_policies.yaml index b7b3ef7..9218493 100644 --- a/config/crd/patches/webhook_in_policies.yaml +++ b/config/crd/patches/webhook_in_policies.yaml @@ -1,16 +1,16 @@ -# The following patch enables a conversion webhook for the CRD -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: policies.s3.onyxia.sh -spec: - conversion: - strategy: Webhook - webhook: - clientConfig: - service: - namespace: system - name: webhook-service - path: /convert - conversionReviewVersions: - - v1 +# The following patch enables a conversion webhook for the CRD +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: policies.s3.onyxia.sh +spec: + conversion: + strategy: Webhook + webhook: + clientConfig: + service: + namespace: system + name: webhook-service + path: /convert + conversionReviewVersions: + - v1 diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index d2a7cce..90d71f8 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -1,72 +1,72 @@ -# Adds namespace to all resources. -namespace: s3-operator-system - -# Value of this field is prepended to the -# names of all resources, e.g. a deployment named -# "wordpress" becomes "alices-wordpress". -# Note that it should also match with the prefix (text before '-') of the namespace -# field above. -namePrefix: s3-operator- - -# Labels to add to all resources and selectors. -#commonLabels: -# someName: someValue - -bases: -- ../crd -- ../rbac -- ../manager -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in -# crd/kustomization.yaml -#- ../webhook -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. -#- ../certmanager -# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. -#- ../prometheus - -patchesStrategicMerge: -# Protect the /metrics endpoint by putting it behind auth. -# If you want your controller-manager to expose the /metrics -# endpoint w/o any authn/z, please comment the following line. -- manager_auth_proxy_patch.yaml - - - -# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in -# crd/kustomization.yaml -#- manager_webhook_patch.yaml - -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. -# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. -# 'CERTMANAGER' needs to be enabled to use ca injection -#- webhookcainjection_patch.yaml - -# the following config is for teaching kustomize how to do var substitution -vars: -# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. -#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR -# objref: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -# fieldref: -# fieldpath: metadata.namespace -#- name: CERTIFICATE_NAME -# objref: -# kind: Certificate -# group: cert-manager.io -# version: v1 -# name: serving-cert # this name should match the one in certificate.yaml -#- name: SERVICE_NAMESPACE # namespace of the service -# objref: -# kind: Service -# version: v1 -# name: webhook-service -# fieldref: -# fieldpath: metadata.namespace -#- name: SERVICE_NAME -# objref: -# kind: Service -# version: v1 -# name: webhook-service +# Adds namespace to all resources. +namespace: s3-operator-system + +# Value of this field is prepended to the +# names of all resources, e.g. a deployment named +# "wordpress" becomes "alices-wordpress". +# Note that it should also match with the prefix (text before '-') of the namespace +# field above. +namePrefix: s3-operator- + +# Labels to add to all resources and selectors. +#commonLabels: +# someName: someValue + +bases: +- ../crd +- ../rbac +- ../manager +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- ../webhook +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. 'WEBHOOK' components are required. +#- ../certmanager +# [PROMETHEUS] To enable prometheus monitor, uncomment all sections with 'PROMETHEUS'. +#- ../prometheus + +patchesStrategicMerge: +# Protect the /metrics endpoint by putting it behind auth. +# If you want your controller-manager to expose the /metrics +# endpoint w/o any authn/z, please comment the following line. +- manager_auth_proxy_patch.yaml + + + +# [WEBHOOK] To enable webhook, uncomment all the sections with [WEBHOOK] prefix including the one in +# crd/kustomization.yaml +#- manager_webhook_patch.yaml + +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER'. +# Uncomment 'CERTMANAGER' sections in crd/kustomization.yaml to enable the CA injection in the admission webhooks. +# 'CERTMANAGER' needs to be enabled to use ca injection +#- webhookcainjection_patch.yaml + +# the following config is for teaching kustomize how to do var substitution +vars: +# [CERTMANAGER] To enable cert-manager, uncomment all sections with 'CERTMANAGER' prefix. +#- name: CERTIFICATE_NAMESPACE # namespace of the certificate CR +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +# fieldref: +# fieldpath: metadata.namespace +#- name: CERTIFICATE_NAME +# objref: +# kind: Certificate +# group: cert-manager.io +# version: v1 +# name: serving-cert # this name should match the one in certificate.yaml +#- name: SERVICE_NAMESPACE # namespace of the service +# objref: +# kind: Service +# version: v1 +# name: webhook-service +# fieldref: +# fieldpath: metadata.namespace +#- name: SERVICE_NAME +# objref: +# kind: Service +# version: v1 +# name: webhook-service diff --git a/config/default/manager_auth_proxy_patch.yaml b/config/default/manager_auth_proxy_patch.yaml index fa8cf79..74b5323 100644 --- a/config/default/manager_auth_proxy_patch.yaml +++ b/config/default/manager_auth_proxy_patch.yaml @@ -1,55 +1,55 @@ -# This patch inject a sidecar container which is a HTTP proxy for the -# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - affinity: - nodeAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - nodeSelectorTerms: - - matchExpressions: - - key: kubernetes.io/arch - operator: In - values: - - amd64 - - arm64 - - ppc64le - - s390x - - key: kubernetes.io/os - operator: In - values: - - linux - containers: - - name: kube-rbac-proxy - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - "ALL" - image: gcr.io/kubebuilder/kube-rbac-proxy:v0.14.1 - args: - - "--secure-listen-address=0.0.0.0:8443" - - "--upstream=http://127.0.0.1:8080/" - - "--logtostderr=true" - - "--v=0" - ports: - - containerPort: 8443 - protocol: TCP - name: https - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 5m - memory: 64Mi - - name: manager - args: - - "--health-probe-bind-address=:8081" - - "--metrics-bind-address=127.0.0.1:8080" - - "--leader-elect" +# This patch inject a sidecar container which is a HTTP proxy for the +# controller manager, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/arch + operator: In + values: + - amd64 + - arm64 + - ppc64le + - s390x + - key: kubernetes.io/os + operator: In + values: + - linux + containers: + - name: kube-rbac-proxy + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.14.1 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=0" + ports: + - containerPort: 8443 + protocol: TCP + name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 5m + memory: 64Mi + - name: manager + args: + - "--health-probe-bind-address=:8081" + - "--metrics-bind-address=127.0.0.1:8080" + - "--leader-elect" diff --git a/config/default/manager_config_patch.yaml b/config/default/manager_config_patch.yaml index f6f5891..655b7cc 100644 --- a/config/default/manager_config_patch.yaml +++ b/config/default/manager_config_patch.yaml @@ -1,10 +1,10 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system -spec: - template: - spec: - containers: - - name: manager +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system +spec: + template: + spec: + containers: + - name: manager diff --git a/config/manager/kustomization.yaml b/config/manager/kustomization.yaml index 5c5f0b8..63801df 100644 --- a/config/manager/kustomization.yaml +++ b/config/manager/kustomization.yaml @@ -1,2 +1,2 @@ -resources: -- manager.yaml +resources: +- manager.yaml diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 4c237c1..f0de0e3 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -1,102 +1,102 @@ -apiVersion: v1 -kind: Namespace -metadata: - labels: - control-plane: controller-manager - app.kubernetes.io/name: namespace - app.kubernetes.io/instance: system - app.kubernetes.io/component: manager - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: system ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: controller-manager - namespace: system - labels: - control-plane: controller-manager - app.kubernetes.io/name: deployment - app.kubernetes.io/instance: controller-manager - app.kubernetes.io/component: manager - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize -spec: - selector: - matchLabels: - control-plane: controller-manager - replicas: 1 - template: - metadata: - annotations: - kubectl.kubernetes.io/default-container: manager - labels: - control-plane: controller-manager - spec: - # TODO(user): Uncomment the following code to configure the nodeAffinity expression - # according to the platforms which are supported by your solution. - # It is considered best practice to support multiple architectures. You can - # build your manager image using the makefile target docker-buildx. - # affinity: - # nodeAffinity: - # requiredDuringSchedulingIgnoredDuringExecution: - # nodeSelectorTerms: - # - matchExpressions: - # - key: kubernetes.io/arch - # operator: In - # values: - # - amd64 - # - arm64 - # - ppc64le - # - s390x - # - key: kubernetes.io/os - # operator: In - # values: - # - linux - securityContext: - runAsNonRoot: true - # TODO(user): For common cases that do not require escalating privileges - # it is recommended to ensure that all your Pods/Containers are restrictive. - # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted - # Please uncomment the following code if your project does NOT have to work on old Kubernetes - # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). - # seccompProfile: - # type: RuntimeDefault - containers: - - command: - - /manager - args: - - --leader-elect - image: controller:latest - name: manager - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - "ALL" - livenessProbe: - httpGet: - path: /healthz - port: 8081 - initialDelaySeconds: 15 - periodSeconds: 20 - readinessProbe: - httpGet: - path: /readyz - port: 8081 - initialDelaySeconds: 5 - periodSeconds: 10 - # TODO(user): Configure the resources accordingly based on the project requirements. - # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ - resources: - limits: - cpu: 500m - memory: 128Mi - requests: - cpu: 10m - memory: 64Mi - serviceAccountName: controller-manager - terminationGracePeriodSeconds: 10 +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: namespace + app.kubernetes.io/instance: system + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: system +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: controller-manager + namespace: system + labels: + control-plane: controller-manager + app.kubernetes.io/name: deployment + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: controller-manager + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: manager + labels: + control-plane: controller-manager + spec: + # TODO(user): Uncomment the following code to configure the nodeAffinity expression + # according to the platforms which are supported by your solution. + # It is considered best practice to support multiple architectures. You can + # build your manager image using the makefile target docker-buildx. + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/arch + # operator: In + # values: + # - amd64 + # - arm64 + # - ppc64le + # - s390x + # - key: kubernetes.io/os + # operator: In + # values: + # - linux + securityContext: + runAsNonRoot: true + # TODO(user): For common cases that do not require escalating privileges + # it is recommended to ensure that all your Pods/Containers are restrictive. + # More info: https://kubernetes.io/docs/concepts/security/pod-security-standards/#restricted + # Please uncomment the following code if your project does NOT have to work on old Kubernetes + # versions < 1.19 or on vendors versions which do NOT support this field by default (i.e. Openshift < 4.11 ). + # seccompProfile: + # type: RuntimeDefault + containers: + - command: + - /manager + args: + - --leader-elect + image: controller:latest + name: manager + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + # TODO(user): Configure the resources accordingly based on the project requirements. + # More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: controller-manager + terminationGracePeriodSeconds: 10 diff --git a/config/manifests/kustomization.yaml b/config/manifests/kustomization.yaml index 5d4b73c..3fce432 100644 --- a/config/manifests/kustomization.yaml +++ b/config/manifests/kustomization.yaml @@ -1,27 +1,27 @@ -# These resources constitute the fully configured set of manifests -# used to generate the 'manifests/' directory in a bundle. -resources: -- bases/s3-operator.clusterserviceversion.yaml -- ../default -- ../samples -- ../scorecard - -# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. -# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. -# These patches remove the unnecessary "cert" volume and its manager container volumeMount. -#patchesJson6902: -#- target: -# group: apps -# version: v1 -# kind: Deployment -# name: controller-manager -# namespace: system -# patch: |- -# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. -# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. -# - op: remove -# path: /spec/template/spec/containers/1/volumeMounts/0 -# # Remove the "cert" volume, since OLM will create and mount a set of certs. -# # Update the indices in this path if adding or removing volumes in the manager's Deployment. -# - op: remove -# path: /spec/template/spec/volumes/0 +# These resources constitute the fully configured set of manifests +# used to generate the 'manifests/' directory in a bundle. +resources: +- bases/s3-operator.clusterserviceversion.yaml +- ../default +- ../samples +- ../scorecard + +# [WEBHOOK] To enable webhooks, uncomment all the sections with [WEBHOOK] prefix. +# Do NOT uncomment sections with prefix [CERTMANAGER], as OLM does not support cert-manager. +# These patches remove the unnecessary "cert" volume and its manager container volumeMount. +#patchesJson6902: +#- target: +# group: apps +# version: v1 +# kind: Deployment +# name: controller-manager +# namespace: system +# patch: |- +# # Remove the manager container's "cert" volumeMount, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing containers/volumeMounts in the manager's Deployment. +# - op: remove +# path: /spec/template/spec/containers/1/volumeMounts/0 +# # Remove the "cert" volume, since OLM will create and mount a set of certs. +# # Update the indices in this path if adding or removing volumes in the manager's Deployment. +# - op: remove +# path: /spec/template/spec/volumes/0 diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml index ed13716..622944f 100644 --- a/config/prometheus/kustomization.yaml +++ b/config/prometheus/kustomization.yaml @@ -1,2 +1,2 @@ -resources: -- monitor.yaml +resources: +- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml index 03e7372..b3d897c 100644 --- a/config/prometheus/monitor.yaml +++ b/config/prometheus/monitor.yaml @@ -1,26 +1,26 @@ - -# Prometheus Monitor Service (Metrics) -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - labels: - control-plane: controller-manager - app.kubernetes.io/name: servicemonitor - app.kubernetes.io/instance: controller-manager-metrics-monitor - app.kubernetes.io/component: metrics - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: controller-manager-metrics-monitor - namespace: system -spec: - endpoints: - - path: /metrics - port: https - scheme: https - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token - tlsConfig: - insecureSkipVerify: true - selector: - matchLabels: - control-plane: controller-manager + +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: servicemonitor + app.kubernetes.io/instance: controller-manager-metrics-monitor + app.kubernetes.io/component: metrics + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-monitor + namespace: system +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + insecureSkipVerify: true + selector: + matchLabels: + control-plane: controller-manager diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml index 6f8f5e1..3862f39 100644 --- a/config/rbac/auth_proxy_client_clusterrole.yaml +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -1,16 +1,16 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: metrics-reader - app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: metrics-reader -rules: -- nonResourceURLs: - - "/metrics" - verbs: - - get +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml index 5276e30..4653cbf 100644 --- a/config/rbac/auth_proxy_role.yaml +++ b/config/rbac/auth_proxy_role.yaml @@ -1,24 +1,24 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: proxy-role - app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: proxy-role -rules: -- apiGroups: - - authentication.k8s.io - resources: - - tokenreviews - verbs: - - create -- apiGroups: - - authorization.k8s.io - resources: - - subjectaccessreviews - verbs: - - create +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: proxy-role + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml index 28f0acd..41eefd0 100644 --- a/config/rbac/auth_proxy_role_binding.yaml +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -1,19 +1,19 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/name: clusterrolebinding - app.kubernetes.io/instance: proxy-rolebinding - app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: proxy-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: proxy-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: proxy-rolebinding + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml index 5c4de26..4889565 100644 --- a/config/rbac/auth_proxy_service.yaml +++ b/config/rbac/auth_proxy_service.yaml @@ -1,21 +1,21 @@ -apiVersion: v1 -kind: Service -metadata: - labels: - control-plane: controller-manager - app.kubernetes.io/name: service - app.kubernetes.io/instance: controller-manager-metrics-service - app.kubernetes.io/component: kube-rbac-proxy - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: controller-manager-metrics-service - namespace: system -spec: - ports: - - name: https - port: 8443 - protocol: TCP - targetPort: https - selector: - control-plane: controller-manager +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: controller-manager + app.kubernetes.io/name: service + app.kubernetes.io/instance: controller-manager-metrics-service + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager-metrics-service + namespace: system +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + control-plane: controller-manager diff --git a/config/rbac/bucket_editor_role.yaml b/config/rbac/bucket_editor_role.yaml index ae2f161..912af4b 100644 --- a/config/rbac/bucket_editor_role.yaml +++ b/config/rbac/bucket_editor_role.yaml @@ -1,31 +1,31 @@ -# permissions for end users to edit buckets. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: bucket-editor-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: bucket-editor-role -rules: -- apiGroups: - - s3.onyxia.sh - resources: - - buckets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - s3.onyxia.sh - resources: - - buckets/status - verbs: - - get +# permissions for end users to edit buckets. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: bucket-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: bucket-editor-role +rules: +- apiGroups: + - s3.onyxia.sh + resources: + - buckets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - buckets/status + verbs: + - get diff --git a/config/rbac/bucket_viewer_role.yaml b/config/rbac/bucket_viewer_role.yaml index 0cb56d4..cdf8cd8 100644 --- a/config/rbac/bucket_viewer_role.yaml +++ b/config/rbac/bucket_viewer_role.yaml @@ -1,27 +1,27 @@ -# permissions for end users to view buckets. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: bucket-viewer-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: bucket-viewer-role -rules: -- apiGroups: - - s3.onyxia.sh - resources: - - buckets - verbs: - - get - - list - - watch -- apiGroups: - - s3.onyxia.sh - resources: - - buckets/status - verbs: - - get +# permissions for end users to view buckets. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: bucket-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: bucket-viewer-role +rules: +- apiGroups: + - s3.onyxia.sh + resources: + - buckets + verbs: + - get + - list + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - buckets/status + verbs: + - get diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml index 731832a..43a6eeb 100644 --- a/config/rbac/kustomization.yaml +++ b/config/rbac/kustomization.yaml @@ -1,18 +1,18 @@ -resources: -# All RBAC will be applied under this service account in -# the deployment namespace. You may comment out this resource -# if your manager will use a service account that exists at -# runtime. Be sure to update RoleBinding and ClusterRoleBinding -# subjects if changing service account names. -- service_account.yaml -- role.yaml -- role_binding.yaml -- leader_election_role.yaml -- leader_election_role_binding.yaml -# Comment the following 4 lines if you want to disable -# the auth proxy (https://github.com/brancz/kube-rbac-proxy) -# which protects your /metrics endpoint. -- auth_proxy_service.yaml -- auth_proxy_role.yaml -- auth_proxy_role_binding.yaml -- auth_proxy_client_clusterrole.yaml +resources: +# All RBAC will be applied under this service account in +# the deployment namespace. You may comment out this resource +# if your manager will use a service account that exists at +# runtime. Be sure to update RoleBinding and ClusterRoleBinding +# subjects if changing service account names. +- service_account.yaml +- role.yaml +- role_binding.yaml +- leader_election_role.yaml +- leader_election_role_binding.yaml +# Comment the following 4 lines if you want to disable +# the auth proxy (https://github.com/brancz/kube-rbac-proxy) +# which protects your /metrics endpoint. +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml +- auth_proxy_client_clusterrole.yaml diff --git a/config/rbac/leader_election_role.yaml b/config/rbac/leader_election_role.yaml index f03ce8f..23c754c 100644 --- a/config/rbac/leader_election_role.yaml +++ b/config/rbac/leader_election_role.yaml @@ -1,44 +1,44 @@ -# permissions to do leader election. -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - labels: - app.kubernetes.io/name: role - app.kubernetes.io/instance: leader-election-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: leader-election-role -rules: -- apiGroups: - - "" - resources: - - configmaps - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - coordination.k8s.io - resources: - - leases - verbs: - - get - - list - - watch - - create - - update - - patch - - delete -- apiGroups: - - "" - resources: - - events - verbs: - - create - - patch +# permissions to do leader election. +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + labels: + app.kubernetes.io/name: role + app.kubernetes.io/instance: leader-election-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-role +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch diff --git a/config/rbac/leader_election_role_binding.yaml b/config/rbac/leader_election_role_binding.yaml index d08ed73..eed2fd2 100644 --- a/config/rbac/leader_election_role_binding.yaml +++ b/config/rbac/leader_election_role_binding.yaml @@ -1,19 +1,19 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - labels: - app.kubernetes.io/name: rolebinding - app.kubernetes.io/instance: leader-election-rolebinding - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: leader-election-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: leader-election-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + labels: + app.kubernetes.io/name: rolebinding + app.kubernetes.io/instance: leader-election-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: leader-election-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: leader-election-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/path_editor_role.yaml b/config/rbac/path_editor_role.yaml index ff3e7ac..c40c00f 100644 --- a/config/rbac/path_editor_role.yaml +++ b/config/rbac/path_editor_role.yaml @@ -1,31 +1,31 @@ -# permissions for end users to edit paths. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: path-editor-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: path-editor-role -rules: -- apiGroups: - - s3.onyxia.sh - resources: - - paths - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - s3.onyxia.sh - resources: - - paths/status - verbs: - - get +# permissions for end users to edit paths. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: path-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: path-editor-role +rules: +- apiGroups: + - s3.onyxia.sh + resources: + - paths + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - paths/status + verbs: + - get diff --git a/config/rbac/path_viewer_role.yaml b/config/rbac/path_viewer_role.yaml index 6afede8..1419799 100644 --- a/config/rbac/path_viewer_role.yaml +++ b/config/rbac/path_viewer_role.yaml @@ -1,27 +1,27 @@ -# permissions for end users to view paths. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: path-viewer-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: path-viewer-role -rules: -- apiGroups: - - s3.onyxia.sh - resources: - - paths - verbs: - - get - - list - - watch -- apiGroups: - - s3.onyxia.sh - resources: - - paths/status - verbs: - - get +# permissions for end users to view paths. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: path-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: path-viewer-role +rules: +- apiGroups: + - s3.onyxia.sh + resources: + - paths + verbs: + - get + - list + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - paths/status + verbs: + - get diff --git a/config/rbac/policy_editor_role.yaml b/config/rbac/policy_editor_role.yaml index e3ecdae..7c829d8 100644 --- a/config/rbac/policy_editor_role.yaml +++ b/config/rbac/policy_editor_role.yaml @@ -1,31 +1,31 @@ -# permissions for end users to edit policies. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: policy-editor-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: policy-editor-role -rules: -- apiGroups: - - s3.onyxia.sh - resources: - - policies - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - s3.onyxia.sh - resources: - - policies/status - verbs: - - get +# permissions for end users to edit policies. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: policy-editor-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: policy-editor-role +rules: +- apiGroups: + - s3.onyxia.sh + resources: + - policies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - policies/status + verbs: + - get diff --git a/config/rbac/policy_viewer_role.yaml b/config/rbac/policy_viewer_role.yaml index ca576a6..c9c408f 100644 --- a/config/rbac/policy_viewer_role.yaml +++ b/config/rbac/policy_viewer_role.yaml @@ -1,27 +1,27 @@ -# permissions for end users to view policies. -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - labels: - app.kubernetes.io/name: clusterrole - app.kubernetes.io/instance: policy-viewer-role - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: policy-viewer-role -rules: -- apiGroups: - - s3.onyxia.sh - resources: - - policies - verbs: - - get - - list - - watch -- apiGroups: - - s3.onyxia.sh - resources: - - policies/status - verbs: - - get +# permissions for end users to view policies. +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: policy-viewer-role + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: policy-viewer-role +rules: +- apiGroups: + - s3.onyxia.sh + resources: + - policies + verbs: + - get + - list + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - policies/status + verbs: + - get diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml index 5190141..e7383eb 100644 --- a/config/rbac/role.yaml +++ b/config/rbac/role.yaml @@ -2,31 +2,29 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: - creationTimestamp: null name: manager-role rules: - apiGroups: - - s3.onyxia.sh + - "" resources: - - S3User + - secrets verbs: - create - delete - get - list - - patch - update - watch - apiGroups: - - s3.onyxia.sh + - "" resources: - - S3User/finalizers + - secrets/finalizers verbs: - update - apiGroups: - - s3.onyxia.sh + - "" resources: - - S3User/status + - secrets/status verbs: - get - patch @@ -35,58 +33,10 @@ rules: - s3.onyxia.sh resources: - buckets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - s3.onyxia.sh - resources: - - buckets/finalizers - verbs: - - update -- apiGroups: - - s3.onyxia.sh - resources: - - buckets/status - verbs: - - get - - patch - - update -- apiGroups: - - s3.onyxia.sh - resources: - paths - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - s3.onyxia.sh - resources: - - paths/finalizers - verbs: - - update -- apiGroups: - - s3.onyxia.sh - resources: - - paths/status - verbs: - - get - - patch - - update -- apiGroups: - - s3.onyxia.sh - resources: - policies + - s3instances + - s3users verbs: - create - delete @@ -98,13 +48,21 @@ rules: - apiGroups: - s3.onyxia.sh resources: + - buckets/finalizers + - paths/finalizers - policies/finalizers + - s3instances/finalizers + - s3users/finalizers verbs: - update - apiGroups: - s3.onyxia.sh resources: + - buckets/status + - paths/status - policies/status + - s3instances/status + - s3users/status verbs: - get - patch diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml index aee04f4..ab6bd0e 100644 --- a/config/rbac/role_binding.yaml +++ b/config/rbac/role_binding.yaml @@ -1,19 +1,19 @@ -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - labels: - app.kubernetes.io/name: clusterrolebinding - app.kubernetes.io/instance: manager-rolebinding - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: manager-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: manager-role -subjects: -- kind: ServiceAccount - name: controller-manager - namespace: system +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: manager-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: manager-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: manager-role +subjects: +- kind: ServiceAccount + name: controller-manager + namespace: system diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml index 9cf3648..105c0fd 100644 --- a/config/rbac/service_account.yaml +++ b/config/rbac/service_account.yaml @@ -1,12 +1,12 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - app.kubernetes.io/name: serviceaccount - app.kubernetes.io/instance: controller-manager - app.kubernetes.io/component: rbac - app.kubernetes.io/created-by: s3-operator - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - name: controller-manager - namespace: system +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: controller-manager + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + app.kubernetes.io/managed-by: kustomize + name: controller-manager + namespace: system diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml index e886e4e..6f46196 100644 --- a/config/samples/kustomization.yaml +++ b/config/samples/kustomization.yaml @@ -1,6 +1,6 @@ -## Append samples you want in your CSV to this file as resources ## -resources: -- s3.onyxia.sh_v1alpha1_bucket.yaml -- s3.onyxia.sh_v1alpha1_policy.yaml -- s3.onyxia.sh_v1alpha1_path.yaml -#+kubebuilder:scaffold:manifestskustomizesamples +## Append samples you want in your CSV to this file as resources ## +resources: +- s3.onyxia.sh_v1alpha1_bucket.yaml +- s3.onyxia.sh_v1alpha1_policy.yaml +- s3.onyxia.sh_v1alpha1_path.yaml +#+kubebuilder:scaffold:manifestskustomizesamples diff --git a/config/samples/s3.onyxia.sh_v1alpha1_bucket.yaml b/config/samples/s3.onyxia.sh_v1alpha1_bucket.yaml index 4d55426..28dfc45 100644 --- a/config/samples/s3.onyxia.sh_v1alpha1_bucket.yaml +++ b/config/samples/s3.onyxia.sh_v1alpha1_bucket.yaml @@ -1,25 +1,19 @@ -apiVersion: s3.onyxia.sh/v1alpha1 -kind: Bucket -metadata: - labels: - app.kubernetes.io/name: bucket - app.kubernetes.io/instance: bucket-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator - name: bucket-sample -spec: - # Bucket name (on S3 server, as opposed to the name of the CR) - name: dummy-bucket - - # Paths to create on the bucket - paths: - - opendata - - # Quota values are in bytes - # TODO : ideally, it could be better to be able to specify the size unit - or at least use one less verbose - # than "bytes", but the first S3 provider implemented on this operator does not allow this. Or rather, the Minio - # CLI (mc) does, but the Go client sadly does not, which leaves us with the default unit of "bytes". - quota: - default: 10000000 # default value used for quota initial creation - # override: 20000000 # value overridable by a cluster admin +apiVersion: s3.onyxia.sh/v1alpha1 +kind: Bucket +metadata: + name: bucket-sample +spec: + # Bucket name (on S3 server, as opposed to the name of the CR) + name: dummy-bucket + + # Paths to create on the bucket + paths: + - opendata + + # Quota values are in bytes + # TODO : ideally, it could be better to be able to specify the size unit - or at least use one less verbose + # than "bytes", but the first S3 provider implemented on this operator does not allow this. Or rather, the Minio + # CLI (mc) does, but the Go client sadly does not, which leaves us with the default unit of "bytes". + quota: + default: 10000000 # default value used for quota initial creation + # override: 20000000 # value overridable by a cluster admin diff --git a/config/samples/s3.onyxia.sh_v1alpha1_path.yaml b/config/samples/s3.onyxia.sh_v1alpha1_path.yaml index 86cf9af..1466d9f 100644 --- a/config/samples/s3.onyxia.sh_v1alpha1_path.yaml +++ b/config/samples/s3.onyxia.sh_v1alpha1_path.yaml @@ -1,18 +1,12 @@ -apiVersion: s3.onyxia.sh/v1alpha1 -kind: Path -metadata: - labels: - app.kubernetes.io/name: path - app.kubernetes.io/instance: path-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator - name: path-sample -spec: - # Bucket name (on S3 server, as opposed to the name of the CR) - bucketName: dummy-bucket - - # Paths to create on the bucket - paths: - - path/to/create # a .keep file will be created at s3://dummy-bucket/path/to/create/.keep - +apiVersion: s3.onyxia.sh/v1alpha1 +kind: Path +metadata: + name: path-sample +spec: + # Bucket name (on S3 server, as opposed to the name of the CR) + bucketName: dummy-bucket + + # Paths to create on the bucket + paths: + - path/to/create # a .keep file will be created at s3://dummy-bucket/path/to/create/.keep + diff --git a/config/samples/s3.onyxia.sh_v1alpha1_policy.yaml b/config/samples/s3.onyxia.sh_v1alpha1_policy.yaml index bff4f74..99214f0 100644 --- a/config/samples/s3.onyxia.sh_v1alpha1_policy.yaml +++ b/config/samples/s3.onyxia.sh_v1alpha1_policy.yaml @@ -1,32 +1,26 @@ -apiVersion: s3.onyxia.sh/v1alpha1 -kind: Policy -metadata: - labels: - app.kubernetes.io/name: policy - app.kubernetes.io/instance: policy-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator - name: policy-sample -spec: - # Policy name (on S3 server, as opposed to the name of the CR) - name: dummy-policy - - # Content of the policy, as multiline string - policyContent: >- - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:*" - ], - "Resource": [ - "arn:aws:s3:::dummy-bucket", - "arn:aws:s3:::dummy-bucket/*" - ] - } - ] - } - +apiVersion: s3.onyxia.sh/v1alpha1 +kind: Policy +metadata: + name: policy-sample +spec: + # Policy name (on S3 server, as opposed to the name of the CR) + name: dummy-policy + + # Content of the policy, as multiline string + policyContent: >- + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": [ + "s3:*" + ], + "Resource": [ + "arn:aws:s3:::dummy-bucket", + "arn:aws:s3:::dummy-bucket/*" + ] + } + ] + } + diff --git a/config/samples/s3.onyxia.sh_v1alpha1_s3instance.yaml b/config/samples/s3.onyxia.sh_v1alpha1_s3instance.yaml new file mode 100644 index 0000000..f79f1de --- /dev/null +++ b/config/samples/s3.onyxia.sh_v1alpha1_s3instance.yaml @@ -0,0 +1,31 @@ +apiVersion: s3.onyxia.sh/v1alpha1 +kind: S3Instance +metadata: + name: s3instance-sample +spec: + s3Provider: minio + url: https://minio.example.com + secretRef: minio-credentials + caCertSecretRef: minio-certificates + # allowedNamespaces: "*" # if not present only resources from the same namespace is allowed + # region: us-east-1 +--- +apiVersion: v1 +kind: Secret +metadata: + name: minio-credentials +type: Opaque +data: + S3_ACCESS_KEY: accessKey + S3_SECRET_KEY: secretkey +--- +apiVersion: v1 +kind: Secret +metadata: + name: s3-default-instance-cert +type: Opaque +stringData: + ca.crt: | + -----BEGIN CERTIFICATE----- + ... + -----END CERTIFICATE----- diff --git a/config/samples/s3.onyxia.sh_v1alpha1_s3user.yaml b/config/samples/s3.onyxia.sh_v1alpha1_s3user.yaml index 24a6458..43ea54d 100644 --- a/config/samples/s3.onyxia.sh_v1alpha1_s3user.yaml +++ b/config/samples/s3.onyxia.sh_v1alpha1_s3user.yaml @@ -1,17 +1,11 @@ -apiVersion: s3.onyxia.sh/v1alpha1 -kind: S3User -metadata: - labels: - app.kubernetes.io/name: user - app.kubernetes.io/instance: user-sample - app.kubernetes.io/part-of: s3-operator - app.kubernetes.io/managed-by: kustomize - app.kubernetes.io/created-by: s3-operator - name: user-sample -spec: - accessKey: user-sample - policies: - - policy-example1 - - policy-example2 - groups: - - group-example1 +apiVersion: s3.onyxia.sh/v1alpha1 +kind: S3User +metadata: + name: user-sample +spec: + accessKey: user-sample + policies: + - policy-example1 + - policy-example2 + groups: + - group-example1 diff --git a/config/scorecard/bases/config.yaml b/config/scorecard/bases/config.yaml index c770478..345daf3 100644 --- a/config/scorecard/bases/config.yaml +++ b/config/scorecard/bases/config.yaml @@ -1,7 +1,7 @@ -apiVersion: scorecard.operatorframework.io/v1alpha3 -kind: Configuration -metadata: - name: config -stages: -- parallel: true - tests: [] +apiVersion: scorecard.operatorframework.io/v1alpha3 +kind: Configuration +metadata: + name: config +stages: +- parallel: true + tests: [] diff --git a/config/scorecard/kustomization.yaml b/config/scorecard/kustomization.yaml index 50cd2d0..ab51ff6 100644 --- a/config/scorecard/kustomization.yaml +++ b/config/scorecard/kustomization.yaml @@ -1,16 +1,16 @@ -resources: -- bases/config.yaml -patchesJson6902: -- path: patches/basic.config.yaml - target: - group: scorecard.operatorframework.io - version: v1alpha3 - kind: Configuration - name: config -- path: patches/olm.config.yaml - target: - group: scorecard.operatorframework.io - version: v1alpha3 - kind: Configuration - name: config -#+kubebuilder:scaffold:patchesJson6902 +resources: +- bases/config.yaml +patchesJson6902: +- path: patches/basic.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +- path: patches/olm.config.yaml + target: + group: scorecard.operatorframework.io + version: v1alpha3 + kind: Configuration + name: config +#+kubebuilder:scaffold:patchesJson6902 diff --git a/config/scorecard/patches/basic.config.yaml b/config/scorecard/patches/basic.config.yaml index 472a988..0238747 100644 --- a/config/scorecard/patches/basic.config.yaml +++ b/config/scorecard/patches/basic.config.yaml @@ -1,10 +1,10 @@ -- op: add - path: /stages/0/tests/- - value: - entrypoint: - - scorecard-test - - basic-check-spec - image: quay.io/operator-framework/scorecard-test:v1.32.0 - labels: - suite: basic - test: basic-check-spec-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - basic-check-spec + image: quay.io/operator-framework/scorecard-test:v1.32.0 + labels: + suite: basic + test: basic-check-spec-test diff --git a/config/scorecard/patches/olm.config.yaml b/config/scorecard/patches/olm.config.yaml index 343c6d8..182e5fe 100644 --- a/config/scorecard/patches/olm.config.yaml +++ b/config/scorecard/patches/olm.config.yaml @@ -1,50 +1,50 @@ -- op: add - path: /stages/0/tests/- - value: - entrypoint: - - scorecard-test - - olm-bundle-validation - image: quay.io/operator-framework/scorecard-test:v1.32.0 - labels: - suite: olm - test: olm-bundle-validation-test -- op: add - path: /stages/0/tests/- - value: - entrypoint: - - scorecard-test - - olm-crds-have-validation - image: quay.io/operator-framework/scorecard-test:v1.32.0 - labels: - suite: olm - test: olm-crds-have-validation-test -- op: add - path: /stages/0/tests/- - value: - entrypoint: - - scorecard-test - - olm-crds-have-resources - image: quay.io/operator-framework/scorecard-test:v1.32.0 - labels: - suite: olm - test: olm-crds-have-resources-test -- op: add - path: /stages/0/tests/- - value: - entrypoint: - - scorecard-test - - olm-spec-descriptors - image: quay.io/operator-framework/scorecard-test:v1.32.0 - labels: - suite: olm - test: olm-spec-descriptors-test -- op: add - path: /stages/0/tests/- - value: - entrypoint: - - scorecard-test - - olm-status-descriptors - image: quay.io/operator-framework/scorecard-test:v1.32.0 - labels: - suite: olm - test: olm-status-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-bundle-validation + image: quay.io/operator-framework/scorecard-test:v1.32.0 + labels: + suite: olm + test: olm-bundle-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-validation + image: quay.io/operator-framework/scorecard-test:v1.32.0 + labels: + suite: olm + test: olm-crds-have-validation-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-crds-have-resources + image: quay.io/operator-framework/scorecard-test:v1.32.0 + labels: + suite: olm + test: olm-crds-have-resources-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-spec-descriptors + image: quay.io/operator-framework/scorecard-test:v1.32.0 + labels: + suite: olm + test: olm-spec-descriptors-test +- op: add + path: /stages/0/tests/- + value: + entrypoint: + - scorecard-test + - olm-status-descriptors + image: quay.io/operator-framework/scorecard-test:v1.32.0 + labels: + suite: olm + test: olm-status-descriptors-test diff --git a/controllers/bucket_controller.go b/controllers/bucket_controller.go deleted file mode 100644 index f6a88bd..0000000 --- a/controllers/bucket_controller.go +++ /dev/null @@ -1,267 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - "github.com/InseeFrLab/s3-operator/controllers/utils" -) - -// BucketReconciler reconciles a Bucket object -type BucketReconciler struct { - client.Client - Scheme *runtime.Scheme - S3Client factory.S3Client - BucketDeletion bool -} - -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets/finalizers,verbs=update - -const bucketFinalizer = "s3.onyxia.sh/finalizer" - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile -func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // Checking for bucket resource existence - bucketResource := &s3v1alpha1.Bucket{} - err := r.Get(ctx, req.NamespacedName, bucketResource) - if err != nil { - if errors.IsNotFound(err) { - logger.Info("The Bucket custom resource has been removed ; as such the Bucket controller is NOOP.", "req.Name", req.Name) - return ctrl.Result{}, nil - } - logger.Error(err, "An error occurred when attempting to read the Bucket resource from the Kubernetes cluster") - return ctrl.Result{}, err - } - - // Managing bucket deletion with a finalizer - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources - isMarkedForDeletion := bucketResource.GetDeletionTimestamp() != nil - if isMarkedForDeletion { - if controllerutil.ContainsFinalizer(bucketResource, bucketFinalizer) { - // Run finalization logic for bucketFinalizer. If the - // finalization logic fails, don't remove the finalizer so - // that we can retry during the next reconciliation. - if err := r.finalizeBucket(bucketResource); err != nil { - // return ctrl.Result{}, err - logger.Error(err, "an error occurred when attempting to finalize the bucket", "bucket", bucketResource.Spec.Name) - // return ctrl.Result{}, err - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketFinalizeFailed", - fmt.Sprintf("An error occurred when attempting to delete bucket [%s]", bucketResource.Spec.Name), err) - } - - // Remove bucketFinalizer. Once all finalizers have been - // removed, the object will be deleted. - controllerutil.RemoveFinalizer(bucketResource, bucketFinalizer) - err := r.Update(ctx, bucketResource) - if err != nil { - logger.Error(err, "an error occurred when removing finalizer from bucket", "bucket", bucketResource.Spec.Name) - // return ctrl.Result{}, err - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketFinalizerRemovalFailed", - fmt.Sprintf("An error occurred when attempting to remove the finalizer from bucket [%s]", bucketResource.Spec.Name), err) - } - } - return ctrl.Result{}, nil - } - - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(bucketResource, bucketFinalizer) { - controllerutil.AddFinalizer(bucketResource, bucketFinalizer) - err = r.Update(ctx, bucketResource) - if err != nil { - logger.Error(err, "an error occurred when adding finalizer from bucket", "bucket", bucketResource.Spec.Name) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketFinalizerAddFailed", - fmt.Sprintf("An error occurred when attempting to add the finalizer from bucket [%s]", bucketResource.Spec.Name), err) - } - } - - // Bucket lifecycle management (other than deletion) starts here - - // Check bucket existence on the S3 server - found, err := r.S3Client.BucketExists(bucketResource.Spec.Name) - if err != nil { - logger.Error(err, "an error occurred while checking the existence of a bucket", "bucket", bucketResource.Spec.Name) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketExistenceCheckFailed", - fmt.Sprintf("Checking existence of bucket [%s] from S3 instance has failed", bucketResource.Spec.Name), err) - } - - // If the bucket does not exist, it is created based on the CR (with potential quotas and paths) - if !found { - - // Bucket creation - err = r.S3Client.CreateBucket(bucketResource.Spec.Name) - if err != nil { - logger.Error(err, "an error occurred while creating a bucket", "bucket", bucketResource.Spec.Name) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketCreationFailed", - fmt.Sprintf("Creation of bucket [%s] on S3 instance has failed", bucketResource.Spec.Name), err) - } - - // Setting quotas - err = r.S3Client.SetQuota(bucketResource.Spec.Name, bucketResource.Spec.Quota.Default) - if err != nil { - logger.Error(err, "an error occurred while setting a quota on a bucket", "bucket", bucketResource.Spec.Name, "quota", bucketResource.Spec.Quota.Default) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "SetQuotaOnBucketFailed", - fmt.Sprintf("Setting a quota of [%v] on bucket [%s] has failed", bucketResource.Spec.Quota.Default, bucketResource.Spec.Name), err) - } - - // Path creation - for _, v := range bucketResource.Spec.Paths { - err = r.S3Client.CreatePath(bucketResource.Spec.Name, v) - if err != nil { - logger.Error(err, "an error occurred while creating a path on a bucket", "bucket", bucketResource.Spec.Name, "path", v) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "CreatingPathOnBucketFailed", - fmt.Sprintf("Creating the path [%s] on bucket [%s] has failed", v, bucketResource.Spec.Name), err) - } - } - - // The bucket creation, quota setting and path creation happened without any error - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorSucceeded", metav1.ConditionTrue, "BucketCreated", - fmt.Sprintf("The bucket [%s] was created with its quota and paths", bucketResource.Spec.Name), nil) - } - - // If the bucket exists on the S3 server, then we need to compare it to - // its corresponding custom resource, and update it in case the CR has changed. - - // Checking effectiveQuota existence on the bucket - effectiveQuota, err := r.S3Client.GetQuota(bucketResource.Spec.Name) - if err != nil { - logger.Error(err, "an error occurred while getting the quota for a bucket", "bucket", bucketResource.Spec.Name) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketQuotaCheckFailed", - fmt.Sprintf("The check for a quota on bucket [%s] has failed", bucketResource.Spec.Name), err) - } - - // If a quota exists, we check it versus the spec of the CR. In case they don't match, - // we reset the quota using the value from CR ("override" is present, "default" if not) - - // Choosing between override / default - quotaToResetTo := bucketResource.Spec.Quota.Override - if quotaToResetTo == 0 { - quotaToResetTo = bucketResource.Spec.Quota.Default - } - - if effectiveQuota != quotaToResetTo { - err = r.S3Client.SetQuota(bucketResource.Spec.Name, quotaToResetTo) - if err != nil { - logger.Error(err, "an error occurred while resetting the quota for a bucket", "bucket", bucketResource.Spec.Name, "quotaToResetTo", quotaToResetTo) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketQuotaUpdateFailed", - fmt.Sprintf("The quota update (%v => %v) on bucket [%s] has failed", effectiveQuota, quotaToResetTo, bucketResource.Spec.Name), err) - } - } - - // For every path on the custom resource's spec, we check the path actually - // exists on the bucket on the S3 server, and create it if it doesn't - // TODO ? : the way this is naively implemented, it's probably costly. Maybe - // we can get the "effectiveBucket" (with its quota and paths) once at the beginning, - // and iterate on this instead of interrogating the S3 server twice for every path. - // But then again, some buckets will likely be filled with many objects outside the - // scope of the CR, so getting all of them might be even more costly. - for _, pathInCr := range bucketResource.Spec.Paths { - pathExists, err := r.S3Client.PathExists(bucketResource.Spec.Name, pathInCr) - if err != nil { - logger.Error(err, "an error occurred while checking a path's existence on a bucket", "bucket", bucketResource.Spec.Name, "path", pathInCr) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketPathCheckFailed", - fmt.Sprintf("The check for path [%s] on bucket [%s] has failed", pathInCr, bucketResource.Spec.Name), err) - } - - if !pathExists { - err = r.S3Client.CreatePath(bucketResource.Spec.Name, pathInCr) - if err != nil { - logger.Error(err, "an error occurred while creating a path on a bucket", "bucket", bucketResource.Spec.Name, "path", pathInCr) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorFailed", metav1.ConditionFalse, "BucketPathCreationFailed", - fmt.Sprintf("The creation of path [%s] on bucket [%s] has failed", pathInCr, bucketResource.Spec.Name), err) - } - } - } - - // The bucket reconciliation with its CR was succesful (or NOOP) - return r.SetBucketStatusConditionAndUpdate(ctx, bucketResource, "OperatorSucceeded", metav1.ConditionTrue, "BucketUpdated", - fmt.Sprintf("The bucket [%s] was updated according to its matching custom resource", bucketResource.Spec.Name), nil) - -} - -// SetupWithManager sets up the controller with the Manager.* -func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&s3v1alpha1.Bucket{}). - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ - WithEventFilter(predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - // Only reconcile if generation has changed - return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Evaluates to false if the object has been confirmed deleted. - return !e.DeleteStateUnknown - }, - }). - WithOptions(controller.Options{MaxConcurrentReconciles: 10}). - Complete(r) -} - -func (r *BucketReconciler) finalizeBucket(bucketResource *s3v1alpha1.Bucket) error { - if r.BucketDeletion { - return r.S3Client.DeleteBucket(bucketResource.Spec.Name) - } - return nil -} - -func (r *BucketReconciler) SetBucketStatusConditionAndUpdate(ctx context.Context, bucketResource *s3v1alpha1.Bucket, conditionType string, status metav1.ConditionStatus, reason string, message string, srcError error) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // We moved away from meta.SetStatusCondition, as the implementation did not allow for updating - // lastTransitionTime if a Condition (as identified by Reason instead of Type) was previously - // obtained and updated to again. - bucketResource.Status.Conditions = utils.UpdateConditions(bucketResource.Status.Conditions, metav1.Condition{ - Type: conditionType, - Status: status, - Reason: reason, - LastTransitionTime: metav1.NewTime(time.Now()), - Message: message, - ObservedGeneration: bucketResource.GetGeneration(), - }) - - err := r.Status().Update(ctx, bucketResource) - if err != nil { - logger.Error(err, "an error occurred while updating the status of the bucket resource") - return ctrl.Result{}, utilerrors.NewAggregate([]error{err, srcError}) - } - return ctrl.Result{}, srcError -} diff --git a/controllers/path_controller.go b/controllers/path_controller.go deleted file mode 100644 index d789d5b..0000000 --- a/controllers/path_controller.go +++ /dev/null @@ -1,233 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "context" - "fmt" - "time" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - "github.com/InseeFrLab/s3-operator/controllers/utils" -) - -// PathReconciler reconciles a Path object -type PathReconciler struct { - client.Client - Scheme *runtime.Scheme - S3Client factory.S3Client - PathDeletion bool -} - -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths/finalizers,verbs=update - -const pathFinalizer = "s3.onyxia.sh/finalizer" - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile -func (r *PathReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // Checking for path resource existence - pathResource := &s3v1alpha1.Path{} - err := r.Get(ctx, req.NamespacedName, pathResource) - if err != nil { - if errors.IsNotFound(err) { - logger.Info("The Path custom resource has been removed ; as such the Path controller is NOOP.", "req.Name", req.Name) - return ctrl.Result{}, nil - } - logger.Error(err, "An error occurred when attempting to read the Path resource from the Kubernetes cluster") - return ctrl.Result{}, err - } - - // Managing path deletion with a finalizer - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources - isMarkedForDeletion := pathResource.GetDeletionTimestamp() != nil - if isMarkedForDeletion { - if controllerutil.ContainsFinalizer(pathResource, pathFinalizer) { - // Run finalization logic for pathFinalizer. If the - // finalization logic fails, don't remove the finalizer so - // that we can retry during the next reconciliation. - if err := r.finalizePath(pathResource); err != nil { - // return ctrl.Result{}, err - logger.Error(err, "an error occurred when attempting to finalize the path", "path", pathResource.Name) - // return ctrl.Result{}, err - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathFinalizeFailed", - fmt.Sprintf("An error occurred when attempting to delete path [%s]", pathResource.Name), err) - } - - // Remove pathFinalizer. Once all finalizers have been - // removed, the object will be deleted. - controllerutil.RemoveFinalizer(pathResource, pathFinalizer) - err := r.Update(ctx, pathResource) - if err != nil { - logger.Error(err, "an error occurred when removing finalizer from path", "path", pathResource.Name) - // return ctrl.Result{}, err - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathFinalizerRemovalFailed", - fmt.Sprintf("An error occurred when attempting to remove the finalizer from path [%s]", pathResource.Name), err) - } - } - return ctrl.Result{}, nil - } - - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(pathResource, pathFinalizer) { - controllerutil.AddFinalizer(pathResource, pathFinalizer) - err = r.Update(ctx, pathResource) - if err != nil { - logger.Error(err, "an error occurred when adding finalizer from path", "path", pathResource.Name) - // return ctrl.Result{}, err - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathFinalizerAddFailed", - fmt.Sprintf("An error occurred when attempting to add the finalizer from path [%s]", pathResource.Name), err) - } - } - - // Path lifecycle management (other than deletion) starts here - - // Check bucket existence on the S3 server - bucketFound, err := r.S3Client.BucketExists(pathResource.Spec.BucketName) - if err != nil { - logger.Error(err, "an error occurred while checking the existence of a bucket", "bucket", pathResource.Spec.BucketName) - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "BucketExistenceCheckFailed", - fmt.Sprintf("Checking existence of bucket [%s] from S3 instance has failed", pathResource.Spec.BucketName), err) - } - - // If bucket does not exist, the Path CR should be in a failing state - if !bucketFound { - errorBucketNotFound := fmt.Errorf("the path CR %s references a non-existing bucket : %s", pathResource.Name, pathResource.Spec.BucketName) - logger.Error(errorBucketNotFound, errorBucketNotFound.Error()) - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "ReferencingNonExistingBucket", - fmt.Sprintf("The Path CR [%s] references a non-existing bucket [%s]", pathResource.Name, pathResource.Spec.BucketName), errorBucketNotFound) - } - - // If the bucket exists, proceed to create or recreate the referenced paths - // For every path on the custom resource's spec, we check the path actually - // exists on the bucket on the S3 server, and create it if it doesn't - // TODO ? : the way this is naively implemented, it's probably costly. Maybe - // we can get the "effectiveBucket" (with its quota and paths) once at the beginning, - // and iterate on this instead of interrogating the S3 server twice for every path. - // But then again, some buckets will likely be filled with many objects outside the - // scope of the CR, so getting all of them might be even more costly. - for _, pathInCr := range pathResource.Spec.Paths { - pathExists, err := r.S3Client.PathExists(pathResource.Spec.BucketName, pathInCr) - if err != nil { - logger.Error(err, "an error occurred while checking a path's existence on a bucket", "bucket", pathResource.Spec.BucketName, "path", pathInCr) - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathCheckFailed", - fmt.Sprintf("The check for path [%s] on bucket [%s] has failed", pathInCr, pathResource.Spec.BucketName), err) - } - - if !pathExists { - err = r.S3Client.CreatePath(pathResource.Spec.BucketName, pathInCr) - if err != nil { - logger.Error(err, "an error occurred while creating a path on a bucket", "bucket", pathResource.Spec.BucketName, "path", pathInCr) - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorFailed", metav1.ConditionFalse, "PathCreationFailed", - fmt.Sprintf("The creation of path [%s] on bucket [%s] has failed", pathInCr, pathResource.Spec.BucketName), err) - } - } - } - - // The bucket reconciliation with its CR was succesful (or NOOP) - return r.SetPathStatusConditionAndUpdate(ctx, pathResource, "OperatorSucceeded", metav1.ConditionTrue, "PathsCreated", - fmt.Sprintf("The paths were created according to the specs of the [%s] CR", pathResource.Name), nil) - -} - -// SetupWithManager sets up the controller with the Manager. -func (r *PathReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&s3v1alpha1.Path{}). - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ - WithEventFilter(predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - // Only reconcile if generation has changed - return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Evaluates to false if the object has been confirmed deleted. - return !e.DeleteStateUnknown - }, - }). - WithOptions(controller.Options{MaxConcurrentReconciles: 10}). - Complete(r) -} - -func (r *PathReconciler) finalizePath(pathResource *s3v1alpha1.Path) error { - logger := log.Log.WithValues("controller", "path") - if r.PathDeletion { - var failedPaths []string = make([]string, 0) - for _, path := range pathResource.Spec.Paths { - - pathExists, err := r.S3Client.PathExists(pathResource.Spec.BucketName, path) - if err != nil { - logger.Error(err, "finalize : an error occurred while checking a path's existence on a bucket", "bucket", pathResource.Spec.BucketName, "path", path) - } - - if pathExists { - err = r.S3Client.DeletePath(pathResource.Spec.BucketName, path) - if err != nil { - failedPaths = append(failedPaths, path) - } - } - } - - if len(failedPaths) > 0 { - return fmt.Errorf("at least one path couldn't be removed from S3 backend %+q", failedPaths) - } - } - return nil -} - -func (r *PathReconciler) SetPathStatusConditionAndUpdate(ctx context.Context, pathResource *s3v1alpha1.Path, conditionType string, status metav1.ConditionStatus, reason string, message string, srcError error) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // We moved away from meta.SetStatusCondition, as the implementation did not allow for updating - // lastTransitionTime if a Condition (as identified by Reason instead of Type) was previously - // obtained and updated to again. - pathResource.Status.Conditions = utils.UpdateConditions(pathResource.Status.Conditions, metav1.Condition{ - Type: conditionType, - Status: status, - Reason: reason, - LastTransitionTime: metav1.NewTime(time.Now()), - Message: message, - ObservedGeneration: pathResource.GetGeneration(), - }) - - err := r.Status().Update(ctx, pathResource) - if err != nil { - logger.Error(err, "an error occurred while updating the status of the path resource") - return ctrl.Result{}, utilerrors.NewAggregate([]error{err, srcError}) - } - return ctrl.Result{}, srcError -} diff --git a/controllers/policy_controller.go b/controllers/policy_controller.go deleted file mode 100644 index 1dd597a..0000000 --- a/controllers/policy_controller.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "time" - - "github.com/minio/madmin-go/v3" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - - s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - "github.com/InseeFrLab/s3-operator/controllers/utils" -) - -// PolicyReconciler reconciles a Policy object -type PolicyReconciler struct { - client.Client - Scheme *runtime.Scheme - S3Client factory.S3Client - PolicyDeletion bool -} - -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies/finalizers,verbs=update - -const policyFinalizer = "s3.onyxia.sh/finalizer" - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile -func (r *PolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // Checking for policy resource existence - policyResource := &s3v1alpha1.Policy{} - err := r.Get(ctx, req.NamespacedName, policyResource) - if err != nil { - if errors.IsNotFound(err) { - logger.Info("The Policy custom resource has been removed ; as such the Policy controller is NOOP.", "req.Name", req.Name) - return ctrl.Result{}, nil - } - logger.Error(err, "An error occurred when attempting to read the Policy resource from the Kubernetes cluster") - return ctrl.Result{}, err - } - - // Managing policy deletion with a finalizer - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources - isMarkedForDeletion := policyResource.GetDeletionTimestamp() != nil - if isMarkedForDeletion { - if controllerutil.ContainsFinalizer(policyResource, policyFinalizer) { - // Run finalization logic for policyFinalizer. If the - // finalization logic fails, don't remove the finalizer so - // that we can retry during the next reconciliation. - if err := r.finalizePolicy(policyResource); err != nil { - // return ctrl.Result{}, err - logger.Error(err, "an error occurred when attempting to finalize the policy", "policy", policyResource.Spec.Name) - // return ctrl.Result{}, err - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyFinalizeFailed", - fmt.Sprintf("An error occurred when attempting to delete policy [%s]", policyResource.Spec.Name), err) - } - - // Remove policyFinalizer. Once all finalizers have been - // removed, the object will be deleted. - controllerutil.RemoveFinalizer(policyResource, policyFinalizer) - err := r.Update(ctx, policyResource) - if err != nil { - logger.Error(err, "an error occurred when removing finalizer from policy", "policy", policyResource.Spec.Name) - // return ctrl.Result{}, err - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyFinalizerRemovalFailed", - fmt.Sprintf("An error occurred when attempting to remove the finalizer from policy [%s]", policyResource.Spec.Name), err) - } - } - return ctrl.Result{}, nil - } - - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(policyResource, policyFinalizer) { - controllerutil.AddFinalizer(policyResource, policyFinalizer) - err = r.Update(ctx, policyResource) - if err != nil { - logger.Error(err, "an error occurred when adding finalizer from policy", "policy", policyResource.Spec.Name) - // return ctrl.Result{}, err - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyFinalizerAddFailed", - fmt.Sprintf("An error occurred when attempting to add the finalizer from policy [%s]", policyResource.Spec.Name), err) - } - } - - // Policy lifecycle management (other than deletion) starts here - - // Check policy existence on the S3 server - effectivePolicy, err := r.S3Client.GetPolicyInfo(policyResource.Spec.Name) - - // If the policy does not exist on S3... - if err != nil { - logger.Error(err, "an error occurred while checking the existence of a policy", "policy", policyResource.Spec.Name) - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyInfoFailed", - fmt.Sprintf("Obtaining policy[%s] info from S3 instance has failed", policyResource.Spec.Name), err) - } - - if effectivePolicy == nil { - - // Policy creation using info from the CR - err = r.S3Client.CreateOrUpdatePolicy(policyResource.Spec.Name, policyResource.Spec.PolicyContent) - if err != nil { - logger.Error(err, "an error occurred while creating the policy", "policy", policyResource.Spec.Name) - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyCreationFailed", - fmt.Sprintf("The creation of policy [%s] has failed", policyResource.Spec.Name), err) - } - - // Update status to reflect policy creation - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorSucceeded", metav1.ConditionTrue, "PolicyCreated", - fmt.Sprintf("The creation of policy [%s] has succeeded", policyResource.Spec.Name), nil) - - } - - // If the policy exists on S3, we compare its state to the custom resource that spawned it on K8S - matching, err := IsPolicyMatchingWithCustomResource(policyResource, effectivePolicy) - if err != nil { - logger.Error(err, "an error occurred while comparing actual and expected configuration for the policy", "policy", policyResource.Spec.Name) - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyComparisonFailed", - fmt.Sprintf("The comparison between the effective policy [%s] on S3 and its corresponding custom resource on K8S has failed", policyResource.Spec.Name), err) - } - // If the two match, no reconciliation is needed, but we still need to update - // the status, in case the generation changed (eg : rollback to previous state after a problematic change) - if matching { - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorSucceeded", metav1.ConditionTrue, "PolicyUnchanged", - fmt.Sprintf("The policy [%s] matches its corresponding custom resource", policyResource.Spec.Name), nil) - } - - // If not we update the policy to match the CR - err = r.S3Client.CreateOrUpdatePolicy(policyResource.Spec.Name, policyResource.Spec.PolicyContent) - if err != nil { - logger.Error(err, "an error occurred while updating the policy", "policy", policyResource.Spec.Name) - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorFailed", metav1.ConditionFalse, "PolicyUpdateFailed", - fmt.Sprintf("The update of effective policy [%s] on S3 to match its corresponding custom resource on K8S has failed", policyResource.Spec.Name), err) - } - - // Update status to reflect policy update - return r.SetPolicyStatusConditionAndUpdate(ctx, policyResource, "OperatorSucceeded", metav1.ConditionTrue, "PolicyUpdated", - fmt.Sprintf("The policy [%s] was updated according to its matching custom resource", policyResource.Spec.Name), nil) -} - -// SetupWithManager sets up the controller with the Manager. -func (r *PolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { - return ctrl.NewControllerManagedBy(mgr). - For(&s3v1alpha1.Policy{}). - // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ - WithEventFilter(predicate.Funcs{ - UpdateFunc: func(e event.UpdateEvent) bool { - // Only reconcile if generation has changed - return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Evaluates to false if the object has been confirmed deleted. - return !e.DeleteStateUnknown - }, - }). - WithOptions(controller.Options{MaxConcurrentReconciles: 10}). - Complete(r) -} - -func IsPolicyMatchingWithCustomResource(policyResource *s3v1alpha1.Policy, effectivePolicy *madmin.PolicyInfo) (bool, error) { - // The policy content visible in the custom resource usually contains indentations and newlines - // while the one we get from S3 is compacted. In order to compare them, we compact the former. - policyResourceAsByteSlice := []byte(policyResource.Spec.PolicyContent) - buffer := new(bytes.Buffer) - err := json.Compact(buffer, policyResourceAsByteSlice) - if err != nil { - return false, err - } - - // Another gotcha is that the effective policy comes up as a json.RawContent, - // which needs marshalling in order to be properly compared to the []byte we get from the CR. - marshalled, err := json.Marshal(effectivePolicy.Policy) - if err != nil { - return false, err - } - return bytes.Equal(buffer.Bytes(), marshalled), nil -} - -func (r *PolicyReconciler) finalizePolicy(policyResource *s3v1alpha1.Policy) error { - if r.PolicyDeletion { - return r.S3Client.DeletePolicy(policyResource.Spec.Name) - } - return nil -} - -func (r *PolicyReconciler) SetPolicyStatusConditionAndUpdate(ctx context.Context, policyResource *s3v1alpha1.Policy, conditionType string, status metav1.ConditionStatus, reason string, message string, srcError error) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // We moved away from meta.SetStatusCondition, as the implementation did not allow for updating - // lastTransitionTime if a Condition (as identified by Reason instead of Type) was previously - // obtained and updated to again. - policyResource.Status.Conditions = utils.UpdateConditions(policyResource.Status.Conditions, metav1.Condition{ - Type: conditionType, - Status: status, - Reason: reason, - LastTransitionTime: metav1.NewTime(time.Now()), - Message: message, - ObservedGeneration: policyResource.GetGeneration(), - }) - - err := r.Status().Update(ctx, policyResource) - if err != nil { - logger.Error(err, "an error occurred while updating the status of the policy resource") - return ctrl.Result{}, utilerrors.NewAggregate([]error{err, srcError}) - } - return ctrl.Result{}, srcError -} diff --git a/controllers/user_controller.go b/controllers/user_controller.go deleted file mode 100644 index 204b6a2..0000000 --- a/controllers/user_controller.go +++ /dev/null @@ -1,559 +0,0 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controllers - -import ( - "cmp" - "context" - "fmt" - "slices" - "time" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "sigs.k8s.io/controller-runtime/pkg/event" - "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/predicate" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - - s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" - "github.com/InseeFrLab/s3-operator/controllers/s3/factory" - utils "github.com/InseeFrLab/s3-operator/controllers/utils" - password "github.com/InseeFrLab/s3-operator/controllers/utils/password" -) - -// S3UserReconciler reconciles a S3User object -type S3UserReconciler struct { - client.Client - Scheme *runtime.Scheme - S3Client factory.S3Client - S3UserDeletion bool - OverrideExistingSecret bool -} - -const ( - userFinalizer = "s3.onyxia.sh/userFinalizer" -) - -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=S3User,verbs=get;list;watch;create;update;patch;delete -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=S3User/status,verbs=get;update;patch -//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=S3User/finalizers,verbs=update - -// Reconcile is part of the main kubernetes reconciliation loop which aims to -// move the current state of the cluster closer to the desired state. -// -// For more details, check Reconcile and its Result here: -// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile -func (r *S3UserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // Checking for userResource existence - userResource := &s3v1alpha1.S3User{} - err := r.Get(ctx, req.NamespacedName, userResource) - if err != nil { - if errors.IsNotFound(err) { - logger.Info(fmt.Sprintf("The S3User CR %s (or its owned Secret) has been removed. NOOP", req.Name)) - return ctrl.Result{}, nil - } - logger.Error(err, "An error occurred when fetching the S3User from Kubernetes") - return ctrl.Result{}, err - } - - // Check if the userResource instance is marked to be deleted, which is - // indicated by the deletion timestamp being set. The object will be deleted. - if userResource.GetDeletionTimestamp() != nil { - logger.Info("userResource have been marked for deletion") - return r.handleS3UserDeletion(ctx, userResource) - } - - // Add finalizer for this CR - if !controllerutil.ContainsFinalizer(userResource, userFinalizer) { - logger.Info("adding finalizer to user") - - controllerutil.AddFinalizer(userResource, userFinalizer) - err = r.Update(ctx, userResource) - if err != nil { - logger.Error(err, "an error occurred when adding finalizer from user", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserFinalizerAddFailed", - fmt.Sprintf("An error occurred when attempting to add the finalizer from user %s", userResource.Name), err) - } - } - - // Check user existence on the S3 server - found, err := r.S3Client.UserExist(userResource.Spec.AccessKey) - if err != nil { - logger.Error(err, "an error occurred while checking the existence of a user", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserExistenceCheckFailed", - fmt.Sprintf("The check for user %s's existence on the S3 backend has failed", userResource.Name), err) - } - - // If the user does not exist, it is created based on the CR - if !found { - logger.Info("this user doesn't exist on the S3 backend and will be created", "accessKey", userResource.Spec.AccessKey) - return r.handleS3NewUser(ctx, userResource) - } - logger.Info("this user already exists on the S3 backend and will be reconciled", "accessKey", userResource.Spec.AccessKey) - return r.handleS3ExistingUser(ctx, userResource) - -} - -func (r *S3UserReconciler) handleS3ExistingUser(ctx context.Context, userResource *s3v1alpha1.S3User) (reconcile.Result, error) { - logger := log.FromContext(ctx) - - // --- Begin Secret management section - - userOwnedSecret, err := r.getUserSecret(ctx, userResource) - if err != nil { - if err.Error() == "SecretListingFailed" { - logger.Error(err, "An error occurred when trying to obtain the user's secret. The user will be deleted from S3 backend and recreated with a secret.") - - r.deleteSecret(ctx, &userOwnedSecret) - err = r.S3Client.DeleteUser(userResource.Spec.AccessKey) - if err != nil { - logger.Error(err, "Could not delete user on S3 server", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserDeletionFailed", - fmt.Sprintf("Deletion of S3user %s on S3 server has failed", userResource.Name), err) - } - return r.handleS3NewUser(ctx, userResource) - } else if err.Error() == "S3UserSecretNameMismatch" { - logger.Info("A secret with owner reference to the user was found, but its name doesn't match the spec. This is probably due to the S3User's spec changing (specifically spec.secretName being added, changed or removed). The \"old\" secret will be deleted.") - r.deleteSecret(ctx, &userOwnedSecret) - } - } - - if userOwnedSecret.Name == "" { - logger.Info("Secret associated to user not found, user will be deleted from the S3 backend, then recreated with a secret") - err = r.S3Client.DeleteUser(userResource.Spec.AccessKey) - if err != nil { - logger.Error(err, "Could not delete user on S3 server", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserDeletionFailed", - fmt.Sprintf("Deletion of S3User %s on S3 server has failed", userResource.Name), err) - } - return r.handleS3NewUser(ctx, userResource) - } - - // If a matching secret is found, then we check if it is still valid, as in : do the credentials it - // contains still allow authenticating the S3User on the backend ? If not, the user is deleted and recreated. - // credentialsValid, err := r.S3Client.CheckUserCredentialsValid(userResource.Name, userResource.Spec.AccessKey, string(userOwnedSecret.Data["secretKey"])) - credentialsValid, err := r.S3Client.CheckUserCredentialsValid(userResource.Name, string(userOwnedSecret.Data["accessKey"]), string(userOwnedSecret.Data["secretKey"])) - if err != nil { - logger.Error(err, "An error occurred when checking if user credentials were valid", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserCredentialsCheckFailed", - fmt.Sprintf("Checking the S3User %s's credentials on S3 server has failed", userResource.Name), err) - } - - if !credentialsValid { - logger.Info("The secret containing the credentials will be deleted, and the user will be deleted from the S3 backend, then recreated (through another reconcile)") - r.deleteSecret(ctx, &userOwnedSecret) - err = r.S3Client.DeleteUser(userResource.Spec.AccessKey) - if err != nil { - logger.Error(err, "Could not delete user on S3 server", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserDeletionFailed", - fmt.Sprintf("Deletion of S3user %s on S3 server has failed", userResource.Name), err) - } - - return r.handleS3NewUser(ctx, userResource) - - } - - // --- End Secret management section - - logger.Info("Checking user policies") - userPolicies, err := r.S3Client.GetUserPolicies(userResource.Spec.AccessKey) - if err != nil { - logger.Error(err, "Could not check the user's policies") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyCheckFailed", - fmt.Sprintf("Checking the S3user %s's policies has failed", userResource.Name), err) - } - - policyToDelete := []string{} - policyToAdd := []string{} - for _, policy := range userPolicies { - policyFound := slices.Contains(userResource.Spec.Policies, policy) - if !policyFound { - logger.Info(fmt.Sprintf("S3User policy definition doesn't contain policy %s", policy)) - policyToDelete = append(policyToDelete, policy) - } - } - - for _, policy := range userResource.Spec.Policies { - policyFound := slices.Contains(userPolicies, policy) - if !policyFound { - logger.Info(fmt.Sprintf("S3User policy definition must contain policy %s", policy)) - policyToAdd = append(policyToAdd, policy) - } - } - - if len(policyToDelete) > 0 { - err = r.S3Client.RemovePoliciesFromUser(userResource.Spec.AccessKey, policyToDelete) - if err != nil { - logger.Error(err, "an error occurred while removing policy to user", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyAppendFailed", - fmt.Sprintf("Error while updating policies of user %s on S3 backend has failed", userResource.Name), err) - } - } - - if len(policyToAdd) > 0 { - err := r.S3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policyToAdd) - if err != nil { - logger.Error(err, "an error occurred while adding policy to user", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyAppendFailed", - fmt.Sprintf("Error while updating policies of user %s on S3 backend has failed", userResource.Name), err) - } - } - - logger.Info("User was reconciled without error") - - // Re-fetch the S3User to ensure we have the latest state after updating the secret - // This is necessary at least when creating a user with secretName targetting a pre-existing secret - // that has proper form (data.accessKey and data.secretKey) but isn't owned by any other s3user - if err := r.Get(ctx, types.NamespacedName{Name: userResource.Name, Namespace: userResource.Namespace}, userResource); err != nil { - logger.Error(err, "Failed to re-fetch S3User") - return ctrl.Result{}, err - } - - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorSucceeded", metav1.ConditionTrue, "S3UserUpdated", - fmt.Sprintf("The user %s was updated according to its matching custom resource", userResource.Name), nil) -} - -func (r *S3UserReconciler) handleS3NewUser(ctx context.Context, userResource *s3v1alpha1.S3User) (reconcile.Result, error) { - logger := log.FromContext(ctx) - - // Generating a random secret key - secretKey, err := password.Generate(20, true, false, true) - if err != nil { - logger.Error(err, fmt.Sprintf("Fail to generate password for user %s", userResource.Name)) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserGeneratePasswordFailed", - fmt.Sprintf("An error occurred when attempting to generate password for user %s", userResource.Name), err) - } - - // Create a new K8S Secret to hold the user's accessKey and secretKey - secret, err := r.newSecretForCR(ctx, userResource, map[string][]byte{"accessKey": []byte(userResource.Spec.AccessKey), "secretKey": []byte(secretKey)}) - if err != nil { - // Error while creating the Kubernetes secret - requeue the request. - logger.Error(err, "Could not generate Kubernetes secret") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "SecretGenerationFailed", - fmt.Sprintf("The generation of the k8s Secret %s has failed", userResource.Name), err) - } - - // For managing user creation, we first check if a Secret matching - // the user's spec (not matching the owner reference) exists - existingK8sSecret := &corev1.Secret{} - err = r.Get(ctx, types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}, existingK8sSecret) - - // If none exist : we create the user, then the secret - if err != nil && errors.IsNotFound(err) { - logger.Info("No secret found ; creating a new Secret", "Secret.Namespace", secret.Namespace, "Secret.Name", secret.Name) - - // Creating the user - err = r.S3Client.CreateUser(userResource.Spec.AccessKey, secretKey) - - if err != nil { - logger.Error(err, "an error occurred while creating user on S3 server", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserCreationFailed", - fmt.Sprintf("Creation of user %s on S3 instance has failed", userResource.Name), err) - } - - // Creating the secret - logger.Info("Creating a new secret to store the user's credentials", "secretName", secret.Name) - err = r.Create(ctx, secret) - if err != nil { - logger.Error(err, "Could not create secret") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserSecretCreationFailed", - fmt.Sprintf("The creation of the k8s Secret %s has failed", secret.Name), err) - } - - // Add policies - err = r.addPoliciesToUser(ctx, userResource) - if err != nil { - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyAppendFailed", - fmt.Sprintf("Error while updating policies of user %s on S3 instance has failed", userResource.Name), err) - } - - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorSucceeded", metav1.ConditionTrue, "S3UserCreatedWithNewSecret", - fmt.Sprintf("The S3User %s and the Secret %s were created successfully", userResource.Name, secret.Name), nil) - - } else if err != nil { - logger.Error(err, "Couldn't check secret existence") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "SecretExistenceCheckFailedDuringS3UserCreation", - fmt.Sprintf("The check for an existing secret %s during the creation of the S3User %s has failed", secret.Name, userResource.Name), err) - - } else { - // If a secret already exists, but has a different S3User owner reference, then the creation should - // fail with no requeue, and use the status to inform that the spec should be changed - for _, ref := range existingK8sSecret.OwnerReferences { - if ref.Kind == "S3User" { - if ref.UID != userResource.UID { - logger.Error(fmt.Errorf(""), "The secret matching the new S3User's spec is owned by a different S3User.", "conflictingUser", ref.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserCreationFailedAsSecretIsOwnedByAnotherS3User", - fmt.Sprintf("The secret matching the new S3User's spec is owned by a different, pre-existing S3User (%s). The S3User being created now (%s) won't be created on the S3 backend until its spec changes to target a different secret", ref.Name, userResource.Name), nil) - } - } - } - - if r.OverrideExistingSecret { - // Case 3.2 : they are not valid, but the operator is configured to overwrite it - logger.Info(fmt.Sprintf("A secret with the name %s already exists ; it will be overwritten as per operator configuration", secret.Name)) - - // Creating the user - err = r.S3Client.CreateUser(userResource.Spec.AccessKey, secretKey) - - if err != nil { - logger.Error(err, "an error occurred while creating user on S3 server", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserCreationFailed", - fmt.Sprintf("Creation of user %s on S3 instance has failed", userResource.Name), err) - } - - // Updating the secret - logger.Info("Updating the pre-existing secret with new credentials", "secretName", secret.Name) - err = r.Update(ctx, secret) - if err != nil { - logger.Error(err, "Could not update secret") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "SecretUpdateFailed", - fmt.Sprintf("The update of the k8s Secret %s has failed", secret.Name), err) - } - - // Add policies - err = r.addPoliciesToUser(ctx, userResource) - if err != nil { - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserPolicyAppendFailed", - fmt.Sprintf("Error while updating policies of user %s on S3 instance has failed", userResource.Name), err) - } - - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorSucceeded", metav1.ConditionTrue, "S3UserCreatedWithWithOverridenSecret", - fmt.Sprintf("The S3User %s was created and the Secret %s was updated successfully", userResource.Name, secret.Name), nil) - } - - // Case 3.3 : they are not valid, and the operator is configured keep the existing secret - // The user will not be created, with no requeue and with two possible ways out : either toggle - // OverrideExistingSecret on, or delete the S3User whose credentials are not working anyway. - logger.Error(nil, fmt.Sprintf("A secret with the name %s already exists ; as the operator is configured to NOT override any pre-existing secrets, this user (%s) not be created on S3 backend until spec change (to target new secret), or until the operator configuration is changed to override existing secrets", secret.Name, userResource.Name)) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorSucceeded", metav1.ConditionTrue, "S3UserCreationFailedAsSecretCannotBeOverwritten", - fmt.Sprintf("The S3User %s wasn't created, as its spec targets a secret (%s) containing invalid credentials. The user's spec should be changed to target a different secret.", userResource.Name, secret.Name), nil) - - } -} - -func (r *S3UserReconciler) addPoliciesToUser(ctx context.Context, userResource *s3v1alpha1.S3User) error { - logger := log.FromContext(ctx) - policies := userResource.Spec.Policies - if policies != nil { - err := r.S3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policies) - if err != nil { - logger.Error(err, "an error occurred while adding policy to user", "user", userResource.Name) - return err - } - } - return nil -} - -func (r *S3UserReconciler) handleS3UserDeletion(ctx context.Context, userResource *s3v1alpha1.S3User) (reconcile.Result, error) { - logger := log.FromContext(ctx) - - if controllerutil.ContainsFinalizer(userResource, userFinalizer) { - // Run finalization logic for S3UserFinalizer. If the finalization logic fails, don't remove the finalizer so that we can retry during the next reconciliation. - if err := r.finalizeS3User(userResource); err != nil { - logger.Error(err, "an error occurred when attempting to finalize the user", "user", userResource.Name) - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserFinalizeFailed", - fmt.Sprintf("An error occurred when attempting to delete user %s", userResource.Name), err) - } - - //Remove userFinalizer. Once all finalizers have been removed, the object will be deleted. - controllerutil.RemoveFinalizer(userResource, userFinalizer) - // Unsure why the behavior is different to that of bucket/policy/path controllers, but it appears - // calling r.Update() for adding/removal of finalizer is not necessary (an update event is generated - // with the call to AddFinalizer/RemoveFinalizer), and worse, causes "freshness" problem (with the - // "the object has been modified; please apply your changes to the latest version and try again" error) - err := r.Update(ctx, userResource) - if err != nil { - logger.Error(err, "Failed to remove finalizer.") - return r.setS3UserStatusConditionAndUpdate(ctx, userResource, "OperatorFailed", metav1.ConditionFalse, "S3UserFinalizerRemovalFailed", - fmt.Sprintf("An error occurred when attempting to remove the finalizer from user %s", userResource.Name), err) - } - } - return ctrl.Result{}, nil -} - -func (r *S3UserReconciler) getUserSecret(ctx context.Context, userResource *s3v1alpha1.S3User) (corev1.Secret, error) { - logger := log.FromContext(ctx) - - // Listing every secrets in the S3User's namespace, as a first step - // to get the actual secret matching the S3User proper. - // TODO : proper label matching ? - secretsList := &corev1.SecretList{} - userSecret := corev1.Secret{} - - err := r.List(ctx, secretsList, client.InNamespace(userResource.Namespace)) - if err != nil { - logger.Error(err, "An error occurred while listing the secrets in user's namespace") - return userSecret, fmt.Errorf("SecretListingFailed") - } - - if len(secretsList.Items) == 0 { - logger.Info("The user's namespace doesn't appear to contain any secret") - return userSecret, nil - } - // In all the secrets inside the S3User's namespace, one should have an owner reference - // pointing to the S3User. For that specific secret, we check if its name matches the one from - // the S3User, whether explicit (userResource.Spec.SecretName) or implicit (userResource.Name) - // In case of mismatch, that secret is deleted (and will be recreated) ; if there is a match, - // it will be used for state comparison. - uid := userResource.GetUID() - - // cmp.Or takes the first non "zero" value, see https://pkg.go.dev/cmp#Or - effectiveS3UserSecretName := cmp.Or(userResource.Spec.SecretName, userResource.Name) - for _, secret := range secretsList.Items { - for _, ref := range secret.OwnerReferences { - if ref.UID == uid { - if secret.Name != effectiveS3UserSecretName { - return secret, fmt.Errorf("S3UserSecretNameMismatch") - } else { - userSecret = secret - break - } - } - } - } - - return userSecret, nil -} - -func (r *S3UserReconciler) deleteSecret(ctx context.Context, secret *corev1.Secret) { - logger := log.FromContext(ctx) - err := r.Delete(ctx, secret) - if err != nil { - logger.Error(err, "an error occurred while deleting a secret") - } -} - -// SetupWithManager sets up the controller with the Manager.* -func (r *S3UserReconciler) SetupWithManager(mgr ctrl.Manager) error { - // filterLogger := ctrl.Log.WithName("filterEvt") - return ctrl.NewControllerManagedBy(mgr). - For(&s3v1alpha1.S3User{}). - // The "secret owning" implies the reconcile loop will be called whenever a Secret owned - // by a S3User is created/updated/deleted. In other words, even when creating a single S3User, - // there is going to be several iterations. - Owns(&corev1.Secret{}). - // See : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ - WithEventFilter(predicate.Funcs{ - - // Ignore updates to CR status in which case metadata.Generation does not change, - // unless it is a change to the underlying Secret - UpdateFunc: func(e event.UpdateEvent) bool { - - // To check if the update event is tied to a change on secret, - // we try to cast e.ObjectNew to a secret (only if it's not a S3User, which - // should prevent any TypeAssertionError based panic). - secretUpdate := false - newUser, _ := e.ObjectNew.(*s3v1alpha1.S3User) - if newUser == nil { - secretUpdate = (e.ObjectNew.(*corev1.Secret) != nil) - } - - return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() || secretUpdate - }, - // Ignore create events caused by the underlying secret's creation - CreateFunc: func(e event.CreateEvent) bool { - user, _ := e.Object.(*s3v1alpha1.S3User) - return user != nil - }, - DeleteFunc: func(e event.DeleteEvent) bool { - // Evaluates to false if the object has been confirmed deleted. - return !e.DeleteStateUnknown - }, - }). - WithOptions(controller.Options{MaxConcurrentReconciles: 10}). - Complete(r) -} - -func (r *S3UserReconciler) setS3UserStatusConditionAndUpdate(ctx context.Context, userResource *s3v1alpha1.S3User, conditionType string, status metav1.ConditionStatus, reason string, message string, srcError error) (ctrl.Result, error) { - logger := log.FromContext(ctx) - - // We moved away from meta.SetStatusCondition, as the implementation did not allow for updating - // lastTransitionTime if a Condition (as identified by Reason instead of Type) was previously - // obtained and updated to again. - userResource.Status.Conditions = utils.UpdateConditions(userResource.Status.Conditions, metav1.Condition{ - Type: conditionType, - Status: status, - Reason: reason, - LastTransitionTime: metav1.NewTime(time.Now()), - Message: message, - ObservedGeneration: userResource.GetGeneration(), - }) - - err := r.Status().Update(ctx, userResource) - if err != nil { - logger.Error(err, "an error occurred while updating the status of the S3User resource") - return ctrl.Result{}, utilerrors.NewAggregate([]error{err, srcError}) - } - return ctrl.Result{}, srcError -} - -func (r *S3UserReconciler) finalizeS3User(userResource *s3v1alpha1.S3User) error { - if r.S3UserDeletion { - return r.S3Client.DeleteUser(userResource.Spec.AccessKey) - } - return nil -} - -// newSecretForCR returns a secret with the same name/namespace as the CR. -// The secret will include all labels and annotations from the CR. -func (r *S3UserReconciler) newSecretForCR(ctx context.Context, userResource *s3v1alpha1.S3User, data map[string][]byte) (*corev1.Secret, error) { - logger := log.FromContext(ctx) - - // Reusing the S3User's labels and annotations - labels := map[string]string{} - for k, v := range userResource.ObjectMeta.Labels { - labels[k] = v - } - - annotations := map[string]string{} - for k, v := range userResource.ObjectMeta.Annotations { - annotations[k] = v - } - - secretName := cmp.Or(userResource.Spec.SecretName, userResource.Name) - secret := &corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: secretName, - Namespace: userResource.Namespace, - Labels: labels, - Annotations: annotations, - }, - Data: data, - Type: "Opaque", - } - - // Set S3User instance as the owner and controller - err := ctrl.SetControllerReference(userResource, secret, r.Scheme) - if err != nil { - logger.Error(err, "Could not set owner of kubernetes secret") - return nil, err - } - - return secret, nil - -} diff --git a/controllers/utils/utils.go b/controllers/utils/utils.go deleted file mode 100644 index 4f69b74..0000000 --- a/controllers/utils/utils.go +++ /dev/null @@ -1,29 +0,0 @@ -package utils - -import ( - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// func UpdateConditions(existingConditions []metav1.Condition, conditionType string, status metav1.ConditionStatus, reason string, message string, srcError error) []metav1.Condition { -func UpdateConditions(existingConditions []metav1.Condition, newCondition metav1.Condition) []metav1.Condition { - - // Comparing reason to existing conditions' reason. - // If a match is found, only the lastTransitionTime is updated - // If not, a new condition is added to the existing list - var hasMatch, matchingIndex = false, -1 - for i, condition := range existingConditions { - if condition.Reason == newCondition.Reason { - matchingIndex = i - hasMatch = true - } - } - if hasMatch { - existingConditions[matchingIndex].LastTransitionTime = metav1.NewTime(time.Now()) - existingConditions[matchingIndex].ObservedGeneration = newCondition.ObservedGeneration - return existingConditions - } - - return append([]metav1.Condition{newCondition}, existingConditions...) -} diff --git a/deploy/charts/s3-operator/.helmignore b/deploy/charts/s3-operator/.helmignore new file mode 100644 index 0000000..f82e96d --- /dev/null +++ b/deploy/charts/s3-operator/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/deploy/charts/s3-operator/Chart.yaml b/deploy/charts/s3-operator/Chart.yaml new file mode 100644 index 0000000..2fc22ab --- /dev/null +++ b/deploy/charts/s3-operator/Chart.yaml @@ -0,0 +1,21 @@ +apiVersion: v2 +name: s3-operator +description: A Helm chart for deploying an operator to manage S3 resources (eg buckets, policies) +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.6.0 +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.0.0" diff --git a/deploy/charts/s3-operator/README.md b/deploy/charts/s3-operator/README.md new file mode 100644 index 0000000..0f8b384 --- /dev/null +++ b/deploy/charts/s3-operator/README.md @@ -0,0 +1,32 @@ +# s3-operator + +![Version: 0.3.0](https://img.shields.io/badge/Version-0.3.0-informational?style=flat-square) ![Type: application](https://img.shields.io/badge/Type-application-informational?style=flat-square) ![AppVersion: 0.8.0](https://img.shields.io/badge/AppVersion-0.8.0-informational?style=flat-square) + +A Helm chart for deploying an operator to manage S3 resources (eg buckets, policies) + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| controllerManager.manager.containerSecurityContext.allowPrivilegeEscalation | bool | `false` | | +| controllerManager.manager.containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | | +| controllerManager.manager.image.repository | string | `"inseefrlab/s3-operator"` | | +| controllerManager.manager.image.tag | string | `"latest"` | | +| controllerManager.manager.imagePullPolicy | string | `"IfNotPresent"` | | +| controllerManager.manager.imagePullSecrets | list | `[]` | | +| controllerManager.manager.resources.limits.cpu | string | `"1000m"` | | +| controllerManager.manager.resources.limits.memory | string | `"512Mi"` | | +| controllerManager.manager.resources.requests.cpu | string | `"50m"` | | +| controllerManager.manager.resources.requests.memory | string | `"64Mi"` | | +| controllerManager.replicas | int | `1` | | +| crds.install | bool | `true` | Install and upgrade CRDs | +| crds.keep | bool | `true` | Keep CRDs on chart uninstall | +| kubernetesClusterDomain | string | `"cluster.local"` | | +| s3.deletion.bucket | bool | `false` | | +| s3.deletion.path | bool | `false` | | +| s3.deletion.policy | bool | `false` | | +| s3.endpointUrl | string | `"localhost:9000"` | | +| s3.existingSecret | string | `"my-s3-operator-auth-secret"` | | + +---------------------------------------------- +Autogenerated from chart metadata using [helm-docs v1.11.0](https://github.com/norwoodj/helm-docs/releases/v1.11.0) diff --git a/deploy/charts/s3-operator/templates/_helpers.tpl b/deploy/charts/s3-operator/templates/_helpers.tpl new file mode 100644 index 0000000..0fba3c1 --- /dev/null +++ b/deploy/charts/s3-operator/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "s3-operator.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "s3-operator.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "s3-operator.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "s3-operator.labels" -}} +helm.sh/chart: {{ include "s3-operator.chart" . }} +{{ include "s3-operator.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "s3-operator.selectorLabels" -}} +app.kubernetes.io/name: {{ include "s3-operator.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "s3-operator.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "s3-operator.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/deploy/charts/s3-operator/templates/crds/buckets.yaml b/deploy/charts/s3-operator/templates/crds/buckets.yaml new file mode 100644 index 0000000..2b6b5a5 --- /dev/null +++ b/deploy/charts/s3-operator/templates/crds/buckets.yaml @@ -0,0 +1,157 @@ +{{- if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + {{- if .Values.crds.keep }} + helm.sh/resource-policy: keep + {{- end }} + controller-gen.kubebuilder.io/version: v0.11.1 + labels: + {{- include "s3-operator.labels" . | nindent 4 }} + name: buckets.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: Bucket + listKind: BucketList + plural: buckets + singular: bucket + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Bucket is the Schema for the buckets API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: BucketSpec defines the desired state of Bucket + properties: + name: + description: Name of the bucket + type: string + paths: + description: Paths (folders) to create inside the bucket + items: + type: string + type: array + quota: + description: Quota to apply to the bucket + properties: + default: + description: Default quota to apply, mandatory + format: int64 + type: integer + override: + description: Optional override quota, to be used by cluster admin. + format: int64 + type: integer + required: + - default + type: object + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the bucket + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf + required: + - name + - quota + - s3InstanceRef + type: object + status: + description: BucketStatus defines the observed state of Bucket + properties: + conditions: + description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/crds/paths.yaml b/deploy/charts/s3-operator/templates/crds/paths.yaml new file mode 100644 index 0000000..dbe93fc --- /dev/null +++ b/deploy/charts/s3-operator/templates/crds/paths.yaml @@ -0,0 +1,141 @@ +{{- if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + {{- if .Values.crds.keep }} + helm.sh/resource-policy: keep + {{- end }} + controller-gen.kubebuilder.io/version: v0.11.1 + labels: + {{- include "s3-operator.labels" . | nindent 4 }} + name: paths.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: Path + listKind: PathList + plural: paths + singular: path + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Path is the Schema for the paths API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PathSpec defines the desired state of Path + properties: + bucketName: + description: Name of the bucket + type: string + paths: + description: Paths (folders) to create inside the bucket + items: + type: string + type: array + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the Paths + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf + required: + - bucketName + type: object + status: + description: PathStatus defines the observed state of Path + properties: + conditions: + description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/crds/policies.yaml b/deploy/charts/s3-operator/templates/crds/policies.yaml new file mode 100644 index 0000000..068390e --- /dev/null +++ b/deploy/charts/s3-operator/templates/crds/policies.yaml @@ -0,0 +1,140 @@ +{{- if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + {{- if .Values.crds.keep }} + helm.sh/resource-policy: keep + {{- end }} + controller-gen.kubebuilder.io/version: v0.11.1 + labels: + {{- include "s3-operator.labels" . | nindent 4 }} + name: policies.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: Policy + listKind: PolicyList + plural: policies + singular: policy + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: Policy is the Schema for the policies API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: PolicySpec defines the desired state of Policy + properties: + name: + description: Name of the policy + type: string + policyContent: + description: Content of the policy (IAM JSON format) + type: string + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the Policy + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf + required: + - name + - policyContent + type: object + status: + description: PolicyStatus defines the observed state of Policy + properties: + conditions: + description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/crds/s3instances.yaml b/deploy/charts/s3-operator/templates/crds/s3instances.yaml new file mode 100644 index 0000000..fede9e9 --- /dev/null +++ b/deploy/charts/s3-operator/templates/crds/s3instances.yaml @@ -0,0 +1,180 @@ +{{- if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + {{- if .Values.crds.keep }} + helm.sh/resource-policy: keep + {{- end }} + controller-gen.kubebuilder.io/version: v0.11.1 + labels: + {{- include "s3-operator.labels" . | nindent 4 }} + name: s3instances.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: S3Instance + listKind: S3InstanceList + plural: s3instances + singular: s3instance + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: S3Instance is the Schema for the S3Instances API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: S3InstanceSpec defines the desired state of S3Instance + properties: + allowedNamespaces: + description: AllowedNamespaces to use this S3InstanceUrl if empty + only the namespace of this instance url is allowed to use it + items: + type: string + type: array + bucketDeletionEnabled: + default: false + description: BucketDeletionEnabled Trigger bucket deletion on the + S3 backend upon CR deletion. Will fail if bucket is not empty. + type: boolean + caCertSecretRef: + description: Secret containing key ca.crt with the certificate associated + to the S3InstanceUrl + type: string + pathDeletionEnabled: + default: false + description: PathDeletionEnabled Trigger path deletion on the S3 backend + upon CR deletion. Limited to deleting the `.keep` files used by + the operator. + type: boolean + policyDeletionEnabled: + default: false + description: PolicyDeletionEnabled Trigger policy deletion on the + S3 backend upon CR deletion. + type: boolean + region: + description: region associated to the S3Instance + type: string + s3Provider: + default: minio + description: type of the S3Instance + enum: + - minio + - mockedS3Provider + type: string + x-kubernetes-validations: + - message: S3Provider is immutable + rule: self == oldSelf + s3UserDeletionEnabled: + default: false + description: S3UserDeletionEnabled Trigger S3 deletion on the S3 backend + upon CR deletion. + type: boolean + secretRef: + description: Ref to Secret associated to the S3Instance containing + accessKey and secretKey + type: string + url: + description: url of the S3Instance + type: string + required: + - bucketDeletionEnabled + - pathDeletionEnabled + - policyDeletionEnabled + - s3Provider + - s3UserDeletionEnabled + - secretRef + - url + type: object + status: + description: S3InstanceStatus defines the observed state of S3Instance + properties: + conditions: + description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/crds/s3users.yaml b/deploy/charts/s3-operator/templates/crds/s3users.yaml new file mode 100644 index 0000000..e85647d --- /dev/null +++ b/deploy/charts/s3-operator/templates/crds/s3users.yaml @@ -0,0 +1,144 @@ +{{- if .Values.crds.install }} +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + {{- if .Values.crds.keep }} + helm.sh/resource-policy: keep + {{- end }} + controller-gen.kubebuilder.io/version: v0.11.1 + labels: + {{- include "s3-operator.labels" . | nindent 4 }} + name: s3users.s3.onyxia.sh +spec: + group: s3.onyxia.sh + names: + kind: S3User + listKind: S3UserList + plural: s3users + singular: s3user + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: S3User is the Schema for the S3Users API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: S3UserSpec defines the desired state of S3User + properties: + accessKey: + description: Name of the S3User + type: string + policies: + description: Policies associated to the S3User + items: + type: string + type: array + s3InstanceRef: + default: s3-operator/default + description: s3InstanceRef where create the user + maxLength: 127 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(/[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)?$ + type: string + x-kubernetes-validations: + - message: s3InstanceRef is immutable + rule: self == oldSelf + secretName: + description: SecretName associated to the S3User + type: string + required: + - accessKey + type: object + status: + description: S3UserStatus defines the observed state of S3User + properties: + conditions: + description: 'Status management using Conditions. See also : https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#typical-status-properties' + items: + description: "Condition contains details for one aspect of the current + state of this API Resource. --- This struct is intended for direct + use as an array at the field path .status.conditions. For example, + \n type FooStatus struct{ // Represents the observations of a + foo's current state. // Known .status.conditions.type are: \"Available\", + \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge + // +listType=map // +listMapKey=type Conditions []metav1.Condition + `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" + protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition + transitioned from one status to another. This should be when + the underlying condition changed. If that is not known, then + using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: message is a human readable message indicating + details about the transition. This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: observedGeneration represents the .metadata.generation + that the condition was set based upon. For instance, if .metadata.generation + is currently 12, but the .status.conditions[x].observedGeneration + is 9, the condition is out of date with respect to the current + state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: reason contains a programmatic identifier indicating + the reason for the condition's last transition. Producers + of specific condition types may define expected values and + meanings for this field, and whether the values are considered + a guaranteed API. The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + --- Many .condition.type values are consistent across resources + like Available, but because arbitrary conditions can be useful + (see .node.status.conditions), the ability to deconflict is + important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} +{{- end }} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/default-s3instance.yaml b/deploy/charts/s3-operator/templates/default-s3instance.yaml new file mode 100644 index 0000000..90f3a16 --- /dev/null +++ b/deploy/charts/s3-operator/templates/default-s3instance.yaml @@ -0,0 +1,69 @@ +{{- if .Values.s3.default.enabled -}} +apiVersion: s3.onyxia.sh/v1alpha1 +kind: S3Instance +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + control-plane: controller-manager + {{- include "s3-operator.labels" . | nindent 4 }} + name: default +spec: + s3Provider: {{ .Values.s3.default.s3Provider }} + url: {{ .Values.s3.default.url }} + {{- if .Values.s3.default.secretRef }} + secretRef: {{ .Values.s3.default.secretRef }} + {{- else }} + secretRef: default-s3instance-credentials + {{- end }} + {{- if .Values.s3.default.caCertSecretRef }} + caCertSecretRef: {{ .Values.s3.default.caCertSecretRef }} + {{- else }} + caCertSecretRef: default-s3instance-certificates + {{- end }} + {{- if .Values.s3.default.allowedNamespaces }} + allowedNamespaces: {{ .Values.s3.default.allowedNamespaces }} + {{- end }} + {{- if .Values.s3.default.region }} + region: {{ .Values.s3.default.region }} + {{- end }} + s3UserDeletionEnabled: {{ .Values.s3.default.deletion.s3user }} + pathDeletionEnabled: {{ .Values.s3.default.deletion.path }} + policyDeletionEnabled: {{ .Values.s3.default.deletion.policy }} + bucketDeletionEnabled: {{ .Values.s3.default.deletion.bucket }} + +{{- if not .Values.s3.default.secretRef }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + control-plane: controller-manager + {{- include "s3-operator.labels" . | nindent 4 }} + name: default-s3instance-credentials +type: Opaque +data: + S3_ACCESS_KEY: {{- .Values.s3.default.accessKey }} + S3_SECRET_KEY: {{- .Values.s3.default.secretKey }} +{{- end }} +{{- if not .Values.s3.default.caCertSecretRef }} +--- +apiVersion: v1 +kind: Secret +metadata: + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + control-plane: controller-manager + {{- include "s3-operator.labels" . | nindent 4 }} + name: default-s3instance-certificates +type: Opaque +data: + ca.crt: {{- .Values.s3.default.caCertificatesBase64 }} +{{- end }} +{{- end -}} \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/deployment.yaml b/deploy/charts/s3-operator/templates/deployment.yaml new file mode 100644 index 0000000..b08d55b --- /dev/null +++ b/deploy/charts/s3-operator/templates/deployment.yaml @@ -0,0 +1,73 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "s3-operator.fullname" . }}-controller-manager + labels: + app.kubernetes.io/component: manager + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + control-plane: controller-manager + {{- include "s3-operator.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.controllerManager.replicas }} + selector: + matchLabels: + control-plane: controller-manager + {{- include "s3-operator.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + control-plane: controller-manager + {{- include "s3-operator.selectorLabels" . | nindent 8 }} + annotations: + kubectl.kubernetes.io/default-container: manager + spec: + {{- with .Values.controllerManager.manager.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - args: + - --health-probe-bind-address=:8081 + - --metrics-bind-address=127.0.0.1:8080 + - --leader-elect + - --override-existing-secret={{ .Values.kubernetes.overrideExistingSecret }} + {{- if .Values.controllerManager.manager.extraArgs }} + {{- toYaml .Values.controllerManager.manager.extraArgs | nindent 8 }} + {{- end }} + command: + - /manager + env: + - name: KUBERNETES_CLUSTER_DOMAIN + value: {{ quote .Values.kubernetes.clusterDomain }} + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + {{- range $k, $v := .Values.controllerManager.manager.extraEnv }} + - name: {{ $k }} + value: {{ $v | quote }} + {{- end }} + image: {{ .Values.controllerManager.manager.image.repository }}:{{ .Values.controllerManager.manager.image.tag | default .Chart.AppVersion }} + imagePullPolicy: {{ .Values.controllerManager.manager.imagePullPolicy | default "IfNotPresent" | quote }} + livenessProbe: + httpGet: + path: /healthz + port: 8081 + initialDelaySeconds: 15 + periodSeconds: 20 + name: manager + readinessProbe: + httpGet: + path: /readyz + port: 8081 + initialDelaySeconds: 5 + periodSeconds: 10 + resources: {{- toYaml .Values.controllerManager.manager.resources | nindent 10 + }} + securityContext: {{- toYaml .Values.controllerManager.manager.containerSecurityContext + | nindent 10 }} + securityContext: + runAsNonRoot: true + serviceAccountName: {{ include "s3-operator.fullname" . }}-controller-manager + terminationGracePeriodSeconds: 10 diff --git a/deploy/charts/s3-operator/templates/leader-election-rbac.yaml b/deploy/charts/s3-operator/templates/leader-election-rbac.yaml new file mode 100644 index 0000000..b580324 --- /dev/null +++ b/deploy/charts/s3-operator/templates/leader-election-rbac.yaml @@ -0,0 +1,59 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "s3-operator.fullname" . }}-leader-election-role + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + {{- include "s3-operator.labels" . | nindent 4 }} +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - coordination.k8s.io + resources: + - leases + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "s3-operator.fullname" . }}-leader-election-rolebinding + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + {{- include "s3-operator.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: '{{ include "s3-operator.fullname" . }}-leader-election-role' +subjects: +- kind: ServiceAccount + name: '{{ include "s3-operator.fullname" . }}-controller-manager' + namespace: '{{ .Release.Namespace }}' \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/manager-rbac.yaml b/deploy/charts/s3-operator/templates/manager-rbac.yaml new file mode 100644 index 0000000..d765ca3 --- /dev/null +++ b/deploy/charts/s3-operator/templates/manager-rbac.yaml @@ -0,0 +1,181 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "s3-operator.fullname" . }}-manager-role + labels: + {{- include "s3-operator.labels" . | nindent 4 }} +rules: +rules: +- apiGroups: + - "" + resources: + - secrets + verbs: + - create + - delete + - get + - list + - update + - watch +- apiGroups: + - "" + resources: + - secrets/finalizers + verbs: + - update +- apiGroups: + - "" + resources: + - secrets/status + verbs: + - get + - patch + - update +- apiGroups: + - s3.onyxia.sh + resources: + - buckets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - buckets/finalizers + verbs: + - update +- apiGroups: + - s3.onyxia.sh + resources: + - buckets/status + verbs: + - get + - patch + - update +- apiGroups: + - s3.onyxia.sh + resources: + - paths + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - paths/finalizers + verbs: + - update +- apiGroups: + - s3.onyxia.sh + resources: + - paths/status + verbs: + - get + - patch + - update +- apiGroups: + - s3.onyxia.sh + resources: + - policies + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - policies/finalizers + verbs: + - update +- apiGroups: + - s3.onyxia.sh + resources: + - policies/status + verbs: + - get + - patch + - update +- apiGroups: + - s3.onyxia.sh + resources: + - s3instances + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - s3instances/finalizers + verbs: + - update +- apiGroups: + - s3.onyxia.sh + resources: + - s3instances/status + verbs: + - get + - patch + - update +- apiGroups: + - s3.onyxia.sh + resources: + - s3users + verbs: + - create + - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - s3.onyxia.sh + resources: + - s3users/finalizers + verbs: + - update +- apiGroups: + - s3.onyxia.sh + resources: + - s3users/status + verbs: + - get + - patch + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "s3-operator.fullname" . }}-manager-rolebinding + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + {{- include "s3-operator.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: '{{ include "s3-operator.fullname" . }}-manager-role' +subjects: +- kind: ServiceAccount + name: '{{ include "s3-operator.fullname" . }}-controller-manager' + namespace: '{{ .Release.Namespace }}' \ No newline at end of file diff --git a/deploy/charts/s3-operator/templates/serviceaccount.yaml b/deploy/charts/s3-operator/templates/serviceaccount.yaml new file mode 100644 index 0000000..2a62782 --- /dev/null +++ b/deploy/charts/s3-operator/templates/serviceaccount.yaml @@ -0,0 +1,9 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "s3-operator.fullname" . }}-controller-manager + labels: + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: s3-operator + app.kubernetes.io/part-of: s3-operator + {{- include "s3-operator.labels" . | nindent 4 }} diff --git a/deploy/charts/s3-operator/values.yaml b/deploy/charts/s3-operator/values.yaml new file mode 100644 index 0000000..8643104 --- /dev/null +++ b/deploy/charts/s3-operator/values.yaml @@ -0,0 +1,51 @@ +crds: + # -- Install and upgrade CRDs + install: true + # -- Keep CRDs on chart uninstall + keep: true + +controllerManager: + manager: + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + image: + repository: inseefrlab/s3-operator + tag: latest + imagePullPolicy: IfNotPresent + imagePullSecrets: [] + resources: + limits: + cpu: 1000m + memory: 512Mi + requests: + cpu: 50m + memory: 64Mi + extraArgs: [] + extraEnv: {} + replicas: 1 + +kubernetes: + clusterDomain: cluster.local + overrideExistingSecret: false + +s3: + default: + enabled: false + s3Provider: minio + url: "https://localhost:9000" + accessKey: "accessKey" + secretKey: "secretKey" + caCertificatesBase64: base64encodedPEMFormatCACertificate + region: us-east-1 + # secretRef: "my-s3-operator-auth-secret" + # caCertSecretRef: "my-s3-operator-cert-secret" + # allowedNamespaces: "" + # Should the operator try to delete the resource from the S3 backend upon CR deletion ? + deletion: + bucket: false + path: false + policy: false + s3user: false diff --git a/docs/.gitkeep b/docs/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/go.mod b/go.mod index e781b36..70381ce 100644 --- a/go.mod +++ b/go.mod @@ -1,98 +1,103 @@ module github.com/InseeFrLab/s3-operator -go 1.22 +go 1.23.0 require ( - github.com/minio/madmin-go/v3 v3.0.34 - github.com/minio/minio-go/v7 v7.0.64 - github.com/onsi/ginkgo/v2 v2.11.0 - github.com/onsi/gomega v1.27.10 - go.uber.org/zap v1.25.0 - k8s.io/api v0.28.3 - k8s.io/apimachinery v0.28.3 - k8s.io/client-go v0.28.3 - sigs.k8s.io/controller-runtime v0.16.3 + github.com/minio/madmin-go/v3 v3.0.90 + github.com/minio/minio-go/v7 v7.0.84 + github.com/onsi/ginkgo/v2 v2.22.2 + github.com/onsi/gomega v1.36.2 + github.com/stretchr/testify v1.9.0 + go.uber.org/zap v1.27.0 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 + k8s.io/client-go v0.32.0 + sigs.k8s.io/controller-runtime v0.20.1 +) + +require ( + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-ini/ini v1.67.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/goccy/go-json v0.10.4 // indirect + github.com/prometheus/prometheus v0.54.1 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/x448/float16 v0.8.4 // indirect ) require ( github.com/beorn7/perks v1.0.1 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dustin/go-humanize v1.0.1 // indirect - github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.2.4 // indirect - github.com/go-logr/zapr v1.2.4 // indirect - github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect - github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/zapr v1.3.0 // indirect + github.com/go-ole/go-ole v1.3.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-cmp v0.6.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/imdario/mergo v0.3.6 // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect + github.com/google/uuid v1.6.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.16.7 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect - github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de // indirect + github.com/klauspost/compress v1.17.11 // indirect + github.com/klauspost/cpuid/v2 v2.2.9 // indirect + github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/minio/md5-simd v1.1.2 // indirect - github.com/minio/sha256-simd v1.0.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/philhofer/fwd v1.1.2 // indirect + github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect - github.com/prometheus/client_golang v1.16.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.44.0 // indirect - github.com/prometheus/procfs v0.10.1 // indirect - github.com/prometheus/prom2json v1.3.3 // indirect - github.com/rs/xid v1.5.0 // indirect - github.com/safchain/ethtool v0.3.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect + github.com/prometheus/client_golang v1.20.4 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.60.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/prometheus/prom2json v1.4.1 // indirect + github.com/rs/xid v1.6.0 // indirect + github.com/safchain/ethtool v0.4.1 // indirect github.com/secure-io/sio-go v0.3.1 // indirect - github.com/shirou/gopsutil/v3 v3.23.1 // indirect - github.com/sirupsen/logrus v1.9.3 // indirect + github.com/shirou/gopsutil/v3 v3.24.5 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/tinylib/msgp v1.1.8 // indirect - github.com/tklauser/go-sysconf v0.3.11 // indirect - github.com/tklauser/numcpus v0.6.0 // indirect - github.com/yusufpapurcu/wmi v1.2.2 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/tinylib/msgp v1.2.5 // indirect + github.com/tklauser/go-sysconf v0.3.14 // indirect + github.com/tklauser/numcpus v0.8.0 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e // indirect - golang.org/x/net v0.17.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect - golang.org/x/sync v0.2.0 // indirect - golang.org/x/sys v0.13.0 // indirect - golang.org/x/term v0.13.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.9.3 // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/net v0.33.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.7.0 // indirect + golang.org/x/tools v0.28.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/protobuf v1.36.1 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.28.3 // indirect - k8s.io/component-base v0.28.3 // indirect - k8s.io/klog/v2 v2.100.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + k8s.io/apiextensions-apiserver v0.32.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 9790a39..c2f0df3 100644 --- a/go.sum +++ b/go.sum @@ -1,104 +1,92 @@ -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= -github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/zapr v1.2.4 h1:QHVo+6stLbfJmYGkQ7uGHUCu5hnAFAj6mDe6Ea0SeOo= -github.com/go-logr/zapr v1.2.4/go.mod h1:FyHWQIzQORZ0QVE1BtVHv3cKtNLuXsbNLtpuhNapBOA= -github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A= +github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE= +github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/goccy/go-json v0.10.4 h1:JSwxQzIqKfmFX1swYPpUThQZp/Ka4wzJdK0LWVytLPM= +github.com/goccy/go-json v0.10.4/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= +github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= -github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de h1:V53FWzU6KAZVi1tPp5UIsMoUWJ2/PNwYIDXnu7QuBCE= -github.com/lufia/plan9stats v0.0.0-20230110061619-bbe2e5e100de/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 h1:7UMa6KCCMjZEMDtTVdcGu0B1GmmC7QJKiCCjyTAWQy0= +github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/minio/madmin-go/v3 v3.0.34 h1:MGPQYIWm52liSubofK24FhrznPYnRpQrDNddZJEyBPA= -github.com/minio/madmin-go/v3 v3.0.34/go.mod h1:4QN2NftLSV7MdlT50dkrenOMmNVHluxTvlqJou3hte8= +github.com/minio/madmin-go/v3 v3.0.90 h1:Lz6a6eT1h5QT54fkbsEJ0xcWuvBjE1IaNgxfkxe6Qxs= +github.com/minio/madmin-go/v3 v3.0.90/go.mod h1:pMLdj9OtN0CANNs5tdm6opvOlDFfj0WhbztboZAjRWE= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.64 h1:Zdza8HwOzkld0ZG/og50w56fKi6AAyfqfifmasD9n2Q= -github.com/minio/minio-go/v7 v7.0.64/go.mod h1:R4WVUR6ZTedlCcGwZRauLMIKjgyaWxhs4Mqi/OMPmEc= -github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= -github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= +github.com/minio/minio-go/v7 v7.0.84 h1:D1HVmAF8JF8Bpi6IU4V9vIEj+8pc+xU88EWMs2yed0E= +github.com/minio/minio-go/v7 v7.0.84/go.mod h1:57YXpvc5l3rjPdhqNrDsvVlY0qPI6UTk1bflAe+9doY= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -106,211 +94,153 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= -github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= -github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= -github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= -github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU= +github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk= +github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= +github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c h1:dAMKvw0MlJT1GshSTtih8C2gDs04w8dReiOGXrGLNoY= +github.com/philhofer/fwd v1.1.3-0.20240916144458-20a13a1f6b7c/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= -github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= -github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= -github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= -github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= -github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= -github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= -github.com/prometheus/prom2json v1.3.3 h1:IYfSMiZ7sSOfliBoo89PcufjWO4eAR0gznGcETyaUgo= -github.com/prometheus/prom2json v1.3.3/go.mod h1:Pv4yIPktEkK7btWsrUTWDDDrnpUrAELaOCj+oFwlgmc= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= -github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= -github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= -github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0= -github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= +github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v1.20.4 h1:Tgh3Yr67PaOv/uTqloMsCEdeuFTatm5zIq5+qNN23vI= +github.com/prometheus/client_golang v1.20.4/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.60.0 h1:+V9PAREWNvJMAuJ1x1BaWl9dewMW4YrHZQbx0sJNllA= +github.com/prometheus/common v0.60.0/go.mod h1:h0LYf1R1deLSKtD4Vdg8gy4RuOvENW2J/h19V5NADQw= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/prom2json v1.4.1 h1:7McxdrHgPEOtMwWjkKtd0v5AhpR2Q6QAnlHKVxq0+tQ= +github.com/prometheus/prom2json v1.4.1/go.mod h1:CzOQykSKFxXuC7ELUZHOHQvwKesQ3eN0p2PWLhFitQM= +github.com/prometheus/prometheus v0.54.1 h1:vKuwQNjnYN2/mDoWfHXDhAsz/68q/dQDb+YbcEqU7MQ= +github.com/prometheus/prometheus v0.54.1/go.mod h1:xlLByHhk2g3ycakQGrMaU8K7OySZx98BzeCR99991NY= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/safchain/ethtool v0.4.1 h1:S6mEleTADqgynileXoiapt/nKnatyR6bmIHoF+h2ADo= +github.com/safchain/ethtool v0.4.1/go.mod h1:XLLnZmy4OCRTkksP/UiMjij96YmIsBfmBQcs7H6tA48= github.com/secure-io/sio-go v0.3.1 h1:dNvY9awjabXTYGsTF1PiCySl9Ltofk9GA3VdWlo7rRc= github.com/secure-io/sio-go v0.3.1/go.mod h1:+xbkjDzPjwh4Axd07pRKSNriS9SCiYksWnZqdnfpQxs= -github.com/shirou/gopsutil/v3 v3.23.1 h1:a9KKO+kGLKEvcPIs4W62v0nu3sciVDOOOPUD0Hz7z/4= -github.com/shirou/gopsutil/v3 v3.23.1/go.mod h1:NN6mnm5/0k8jw4cBfCnJtr5L7ErOTg18tMNpgFkn0hA= -github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= -github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/shirou/gopsutil/v3 v3.24.5 h1:i0t8kL+kQTvpAYToeuiVk3TgDeKOFioZO3Ztz/iZ9pI= +github.com/shirou/gopsutil/v3 v3.24.5/go.mod h1:bsoOS1aStSs9ErQ1WWfxllSeS1K5D+U30r2NfcubMVk= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= -github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= -github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= -github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= -github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/tinylib/msgp v1.2.5 h1:WeQg1whrXRFiZusidTQqzETkRpGjFjcIhW6uqWH09po= +github.com/tinylib/msgp v1.2.5/go.mod h1:ykjzy2wzgrlvpDCRc4LA8UXy6D8bzMSuAF3WD57Gok0= +github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= +github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= +github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= +github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= -github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= -go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc= -golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= -golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= -golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= -golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= -golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= +golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.4.0/go.mod h1:UE5sM2OK9E/d67R0ANs2xJizIymRP5gJU295PvKXxjQ= -golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM= -golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= -google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= +google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= -k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08= -k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= -k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= -k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= -k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI= -k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= -sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= +k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= +k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= +k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= +k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= +k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= +k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.20.1 h1:JbGMAG/X94NeM3xvjenVUaBjy6Ui4Ogd/J5ZtjZnHaE= +sigs.k8s.io/controller-runtime v0.20.1/go.mod h1:BrP3w158MwvB3ZbNpaAcIKkHQ7YGpYnzpoSTZ8E14WU= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= +sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 65b8622..2cc45a7 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,15 +1,15 @@ -/* -Copyright 2023. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ \ No newline at end of file diff --git a/internal/controller/bucket/constants.go b/internal/controller/bucket/constants.go new file mode 100644 index 0000000..4feb057 --- /dev/null +++ b/internal/controller/bucket/constants.go @@ -0,0 +1,19 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller + +const bucketFinalizer = "s3.onyxia.sh/finalizer" diff --git a/internal/controller/bucket/controller.go b/internal/controller/bucket/controller.go new file mode 100644 index 0000000..593f4e9 --- /dev/null +++ b/internal/controller/bucket/controller.go @@ -0,0 +1,65 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller + +import ( + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=buckets/finalizers,verbs=update + +// BucketReconciler reconciles a Bucket object +type BucketReconciler struct { + client.Client + Scheme *runtime.Scheme + ReconcilePeriod time.Duration + S3factory s3factory.S3Factory + ControllerHelper *helpers.ControllerHelper + S3Instancehelper *helpers.S3InstanceHelper +} + +// SetupWithManager sets up the controller with the Manager.* +func (r *BucketReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&s3v1alpha1.Bucket{}). + // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Only reconcile if generation has changed + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 10}). + Complete(r) +} diff --git a/internal/controller/bucket/finalizer.go b/internal/controller/bucket/finalizer.go new file mode 100644 index 0000000..3517ac1 --- /dev/null +++ b/internal/controller/bucket/finalizer.go @@ -0,0 +1,112 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller + +import ( + "context" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +func (r *BucketReconciler) handleDeletion( + ctx context.Context, + req reconcile.Request, + bucketResource *s3v1alpha1.Bucket, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + if controllerutil.ContainsFinalizer(bucketResource, bucketFinalizer) { + + if err := r.finalizeBucket(ctx, bucketResource); err != nil { + logger.Error( + err, + "An error occurred while attempting to finalize the bucket", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.DeletionFailure, + "Bucket deletion has failed", + err, + ) + } + + if ok := controllerutil.RemoveFinalizer(bucketResource, bucketFinalizer); !ok { + logger.Info( + "Failed to remove finalizer for bucketResource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + // Let's re-fetch the S3Instance Custom Resource after removing the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Update(ctx, bucketResource); err != nil { + logger.Error( + err, + "Failed to remove finalizer for bucketResource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + } + return ctrl.Result{}, nil +} + +func (r *BucketReconciler) finalizeBucket( + ctx context.Context, + bucketResource *s3v1alpha1.Bucket, +) error { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + bucketResource.Name, + bucketResource.Namespace, + bucketResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return err + } + if s3Client.GetConfig().BucketDeletionEnabled { + return s3Client.DeleteBucket(bucketResource.Spec.Name) + } + return nil +} diff --git a/internal/controller/bucket/finalizer_test.go b/internal/controller/bucket/finalizer_test.go new file mode 100644 index 0000000..5ccfb22 --- /dev/null +++ b/internal/controller/bucket/finalizer_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller_test + +import ( + "context" + "testing" + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + bucket_controller "github.com/InseeFrLab/s3-operator/internal/controller/bucket" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleDelete(t *testing.T) { + + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + bucketResource := &s3v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-bucket", + Namespace: "default", + Generation: 1, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.BucketSpec{ + Name: "example-bucket", + S3InstanceRef: "s3-operator/default", + Quota: s3v1alpha1.Quota{Default: 10}, + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, bucketResource}) + + // Create the reconciler + reconciler := &bucket_controller.BucketReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: bucketResource.Name, Namespace: bucketResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("ressource have been deleted", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: bucketResource.Name, Namespace: bucketResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + policy := &s3v1alpha1.Bucket{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "example-bucket", + }, policy) + assert.NotNil(t, err) + assert.ErrorContains(t, err, "buckets.s3.onyxia.sh \"example-bucket\" not found") + }) + +} diff --git a/internal/controller/bucket/reconcile.go b/internal/controller/bucket/reconcile.go new file mode 100644 index 0000000..75e140c --- /dev/null +++ b/internal/controller/bucket/reconcile.go @@ -0,0 +1,488 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller + +import ( + "context" + "fmt" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *BucketReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Checking for bucket resource existence + bucketResource := &s3v1alpha1.Bucket{} + err := r.Get(ctx, req.NamespacedName, bucketResource) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + "The Bucket custom resource has been removed ; as such the Bucket controller is NOOP.", + "req.Name", + req.Name, + ) + return ctrl.Result{}, nil + } + logger.Error( + err, + "An error occurred when attempting to read the Bucket resource from the Kubernetes cluster", + ) + return ctrl.Result{}, err + } + + // Let's just set the status as Unknown when no status are available + if len(bucketResource.Status.Conditions) == 0 { + meta.SetStatusCondition( + &bucketResource.Status.Conditions, + metav1.Condition{ + Type: s3v1alpha1.ConditionReconciled, + Status: metav1.ConditionUnknown, + ObservedGeneration: bucketResource.Generation, + Reason: s3v1alpha1.Reconciling, + Message: "Starting reconciliation", + }, + ) + if err = r.Status().Update(ctx, bucketResource); err != nil { + logger.Error( + err, + "Failed to update bucketRessource status", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the bucketResource Custom Resource after update the status + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, bucketResource); err != nil { + logger.Error( + err, + "Failed to re-fetch bucketResource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(bucketResource, bucketFinalizer) { + logger.Info("Adding finalizer to bucket resource", "bucketName", + bucketResource.Spec.Name, "NamespacedName", req.NamespacedName.String()) + if ok := controllerutil.AddFinalizer(bucketResource, bucketFinalizer); !ok { + logger.Error( + err, + "Failed to add finalizer into bucket resource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + if err := r.Update(ctx, bucketResource); err != nil { + logger.Error( + err, + "An error occurred when adding finalizer on bucketResource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + if err := r.Get(ctx, req.NamespacedName, bucketResource); err != nil { + logger.Error( + err, + "Failed to re-fetch bucketResource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // // Managing bucket deletion with a finalizer + // // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources + if bucketResource.GetDeletionTimestamp() != nil { + logger.Info("bucketResource have been marked for deletion", "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.handleDeletion(ctx, req, bucketResource) + } + + return r.handleReconciliation(ctx, req, bucketResource) + +} + +func (r *BucketReconciler) handleReconciliation( + ctx context.Context, + req reconcile.Request, + bucketResource *s3v1alpha1.Bucket, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + bucketResource.Name, + bucketResource.Namespace, + bucketResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Bucket lifecycle management (other than deletion) starts here + // Check bucket existence on the S3 server + found, err := s3Client.BucketExists(bucketResource.Spec.Name) + if err != nil { + logger.Error( + err, + "An error occurred while checking the existence of a bucket", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + "Error while checking if bucket already exist", + err, + ) + + } + + // If the bucket does not exist, it is created based on the CR (with potential quotas and paths) + if !found { + return r.handleCreation(ctx, req, bucketResource) + } + + return r.handleUpdate(ctx, req, bucketResource) + +} + +func (r *BucketReconciler) handleUpdate( + ctx context.Context, + req reconcile.Request, + bucketResource *s3v1alpha1.Bucket, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + bucketResource.Name, + bucketResource.Namespace, + bucketResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error( + err, + "An error occurred while getting s3Client for bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // If the bucket exists on the S3 server, then we need to compare it to + // its corresponding custom resource, and update it in case the CR has changed. + + // Checking effectiveQuota existence on the bucket + effectiveQuota, err := s3Client.GetQuota(bucketResource.Spec.Name) + if err != nil { + logger.Error( + err, + "An error occurred while checking the quota for bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + "Checking quota has failed", + err, + ) + } + + // If a quota exists, we check it versus the spec of the CR. In case they don't match, + // we reset the quota using the value from CR ("override" is present, "default" if not) + + // Choosing between override / default + quotaToResetTo := bucketResource.Spec.Quota.Override + if quotaToResetTo == 0 { + quotaToResetTo = bucketResource.Spec.Quota.Default + } + + if effectiveQuota != quotaToResetTo { + err = s3Client.SetQuota(bucketResource.Spec.Name, quotaToResetTo) + if err != nil { + logger.Error( + err, + "An error occurred while resetting the quota for bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "The quota update (%v => %v) has failed", + effectiveQuota, + quotaToResetTo, + ), + err, + ) + } + } + + // For every path on the custom resource's spec, we check the path actually + // exists on the bucket on the S3 server, and create it if it doesn't + // TODO ? : the way this is naively implemented, it's probably costly. Maybe + // we can get the "effectiveBucket" (with its quota and paths) once at the beginning, + // and iterate on this instead of interrogating the S3 server twice for every path. + // But then again, some buckets will likely be filled with many objects outside the + // scope of the CR, so getting all of them might be even more costly. + for _, pathInCr := range bucketResource.Spec.Paths { + pathExists, err := s3Client.PathExists(bucketResource.Spec.Name, pathInCr) + if err != nil { + logger.Error( + err, + "An error occurred while checking a path's existence for bucket ressource", + "path", + pathInCr, + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + fmt.Sprintf("The check for path [%s] in bucket has failed", pathInCr), + err, + ) + } + + if !pathExists { + err = s3Client.CreatePath(bucketResource.Spec.Name, pathInCr) + if err != nil { + logger.Error( + err, + "An error occurred while creating a path for bucket ressource", + "path", + pathInCr, + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + fmt.Sprintf("The creation of path [%s] in bucket has failed", pathInCr), + err, + ) + } + } + } + + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Reconciled, + "Bucket reconciled", + nil, + ) +} + +func (r *BucketReconciler) handleCreation( + ctx context.Context, + req reconcile.Request, + bucketResource *s3v1alpha1.Bucket, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + bucketResource.Name, + bucketResource.Namespace, + bucketResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error( + err, + "An error occurred while getting s3Client for bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Bucket creation + err = s3Client.CreateBucket(bucketResource.Spec.Name) + if err != nil { + logger.Error( + err, + "An error occurred while creating bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.CreationFailure, + "An error occurred while creating bucket", + err, + ) + } + + // Setting quotas + err = s3Client.SetQuota(bucketResource.Spec.Name, bucketResource.Spec.Quota.Default) + if err != nil { + logger.Error( + err, + "An error occurred while setting quota for bucket ressource", + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "Setting a quota of [%v] on bucket [%s] has failed", + bucketResource.Spec.Quota.Default, + bucketResource.Spec.Name, + ), + err, + ) + } + + // Path creation + for _, pathInCr := range bucketResource.Spec.Paths { + err = s3Client.CreatePath(bucketResource.Spec.Name, pathInCr) + if err != nil { + logger.Error( + err, + "An error occurred while creating path for bucket ressource", + "path", + pathInCr, + "bucketName", + bucketResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Unreachable, + fmt.Sprintf("Creation for path [%s] in bucket has failed", pathInCr), + err, + ) + } + } + + return r.SetReconciledCondition( + ctx, + req, + bucketResource, + s3v1alpha1.Reconciled, + "Bucket reconciled", + nil, + ) +} diff --git a/internal/controller/bucket/reconcile_test.go b/internal/controller/bucket/reconcile_test.go new file mode 100644 index 0000000..154b0f2 --- /dev/null +++ b/internal/controller/bucket/reconcile_test.go @@ -0,0 +1,140 @@ +/* +Copyright 2024 Mathieu Parent . + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller_test + +import ( + "context" + "testing" + + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + bucket_controller "github.com/InseeFrLab/s3-operator/internal/controller/bucket" + "github.com/stretchr/testify/assert" +) + +func TestHandleCreate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + bucketResource := &s3v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-bucket", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.BucketSpec{ + Name: "test-bucket", + S3InstanceRef: "s3-operator/default", + Quota: s3v1alpha1.Quota{Default: 10}, + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "s3.onyxia.sh/v1alpha1", + Kind: "Bucket", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, bucketResource}) + + // Create the reconciler + reconciler := &bucket_controller.BucketReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: bucketResource.Name, Namespace: bucketResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} + +func TestHandleUpdate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + bucketResource := &s3v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-bucket", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.BucketSpec{ + Name: "existing-bucket", + Paths: []string{"example"}, + S3InstanceRef: "s3-operator/default", + Quota: s3v1alpha1.Quota{Default: 10}, + }, + } + + // Create a fake client with a sample CR + bucketInvalidResource := &s3v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-invalid-bucket", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.BucketSpec{ + Name: "existing-invalid-bucket", + Paths: []string{"example", "non-existing"}, + S3InstanceRef: "s3-operator/default", + Quota: s3v1alpha1.Quota{Default: 100}}, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, bucketResource, bucketInvalidResource}) + + // Create the reconciler + reconciler := &bucket_controller.BucketReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: bucketResource.Name, Namespace: bucketResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: bucketInvalidResource.Name, Namespace: bucketInvalidResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} diff --git a/internal/controller/bucket/status.go b/internal/controller/bucket/status.go new file mode 100644 index 0000000..e4ff414 --- /dev/null +++ b/internal/controller/bucket/status.go @@ -0,0 +1,47 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket_controller + +import ( + "context" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + + ctrl "sigs.k8s.io/controller-runtime" +) + +func (r *BucketReconciler) SetReconciledCondition( + ctx context.Context, + req ctrl.Request, + bucketResource *s3v1alpha1.Bucket, + reason string, + message string, + err error, +) (ctrl.Result, error) { + return r.ControllerHelper.SetReconciledCondition( + ctx, + r.Status(), + req, + bucketResource, + &bucketResource.Status.Conditions, + s3v1alpha1.ConditionReconciled, + reason, + message, + err, + r.ReconcilePeriod, + ) +} diff --git a/internal/controller/path/constants.go b/internal/controller/path/constants.go new file mode 100644 index 0000000..7bce285 --- /dev/null +++ b/internal/controller/path/constants.go @@ -0,0 +1,19 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller + +const pathFinalizer = "s3.onyxia.sh/finalizer" diff --git a/internal/controller/path/controller.go b/internal/controller/path/controller.go new file mode 100644 index 0000000..8c80136 --- /dev/null +++ b/internal/controller/path/controller.go @@ -0,0 +1,65 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller + +import ( + "time" + + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=paths/finalizers,verbs=update + +// PathReconciler reconciles a Path object +type PathReconciler struct { + client.Client + Scheme *runtime.Scheme + ReconcilePeriod time.Duration + S3factory s3factory.S3Factory + ControllerHelper *helpers.ControllerHelper + S3Instancehelper *helpers.S3InstanceHelper +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PathReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&s3v1alpha1.Path{}). + // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Only reconcile if generation has changed + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 10}). + Complete(r) +} diff --git a/internal/controller/path/finalizer.go b/internal/controller/path/finalizer.go new file mode 100644 index 0000000..1f60c84 --- /dev/null +++ b/internal/controller/path/finalizer.go @@ -0,0 +1,142 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller + +import ( + "context" + "fmt" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *PathReconciler) handleDeletion( + ctx context.Context, + req reconcile.Request, + pathResource *s3v1alpha1.Path, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + if controllerutil.ContainsFinalizer(pathResource, pathFinalizer) { + if err := r.finalizePath(ctx, pathResource); err != nil { + logger.Error( + err, + "An error occurred when attempting to finalize the path", + "path", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.DeletionFailure, + "Path deletion has failed", + err, + ) + } + + // Remove pathFinalizer. Once all finalizers have been + // removed, the object will be deleted. + + if ok := controllerutil.RemoveFinalizer(pathResource, pathFinalizer); !ok { + logger.Info( + "Failed to remove finalizer for pathResource", + "pathResource", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + // Let's re-fetch the S3Instance Custom Resource after removing the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Update(ctx, pathResource); err != nil { + logger.Error( + err, + "An error occurred when removing finalizer from pathResource", + "pathResource", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + } + return ctrl.Result{}, nil +} + +func (r *PathReconciler) finalizePath(ctx context.Context, pathResource *s3v1alpha1.Path) error { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + pathResource.Name, + pathResource.Namespace, + pathResource.Spec.S3InstanceRef, + ) + + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return err + } + + if s3Client.GetConfig().PathDeletionEnabled { + var failedPaths []string = make([]string, 0) + for _, path := range pathResource.Spec.Paths { + + pathExists, err := s3Client.PathExists(pathResource.Spec.BucketName, path) + if err != nil { + logger.Error( + err, + "finalize : an error occurred while checking a path's existence on a bucket", + "bucket", + pathResource.Spec.BucketName, + "path", + path, + ) + } + + if pathExists { + err = s3Client.DeletePath(pathResource.Spec.BucketName, path) + if err != nil { + failedPaths = append(failedPaths, path) + } + } + } + + if len(failedPaths) > 0 { + return fmt.Errorf( + "at least one path couldn't be removed from S3 backend %+q", + failedPaths, + ) + } + } + return nil +} diff --git a/internal/controller/path/finalizer_test.go b/internal/controller/path/finalizer_test.go new file mode 100644 index 0000000..6074cf3 --- /dev/null +++ b/internal/controller/path/finalizer_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller_test + +import ( + "context" + "testing" + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + path_controller "github.com/InseeFrLab/s3-operator/internal/controller/path" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleDelete(t *testing.T) { + + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + pathResource := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-path", + Namespace: "default", + Generation: 1, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.PathSpec{ + S3InstanceRef: "s3-operator/default", + BucketName: "my-bucket", + Paths: []string{"path1"}, + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, pathResource}) + + // Create the reconciler + reconciler := &path_controller.PathReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathResource.Name, Namespace: pathResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("ressource have been deleted", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathResource.Name, Namespace: pathResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + path := &s3v1alpha1.Path{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "example-path", + }, path) + assert.NotNil(t, err) + assert.ErrorContains(t, err, "paths.s3.onyxia.sh \"example-path\" not found") + }) + +} diff --git a/internal/controller/path/reconcile.go b/internal/controller/path/reconcile.go new file mode 100644 index 0000000..e082e5f --- /dev/null +++ b/internal/controller/path/reconcile.go @@ -0,0 +1,292 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller + +import ( + "context" + "fmt" + + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *PathReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Checking for path resource existence + pathResource := &s3v1alpha1.Path{} + err := r.Get(ctx, req.NamespacedName, pathResource) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + "The Path custom resource has been removed ; as such the Path controller is NOOP.", + "req.Name", + req.Name, + ) + return ctrl.Result{}, nil + } + logger.Error( + err, + "An error occurred when attempting to read the Path resource from the Kubernetes cluster", + ) + return ctrl.Result{}, err + } + + // Let's just set the status as Unknown when no status are available + if len(pathResource.Status.Conditions) == 0 { + meta.SetStatusCondition( + &pathResource.Status.Conditions, + metav1.Condition{ + Type: s3v1alpha1.ConditionReconciled, + Status: metav1.ConditionUnknown, + ObservedGeneration: pathResource.Generation, + Reason: s3v1alpha1.Reconciling, + Message: "Starting reconciliation", + }, + ) + if err = r.Status().Update(ctx, pathResource); err != nil { + logger.Error( + err, + "Failed to update pathResource status", + "pathResourceName", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the s3InstanceResource Custom Resource after update the status + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, pathResource); err != nil { + logger.Error( + err, + "Failed to re-fetch pathResource", + "pathResourceName", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(pathResource, pathFinalizer) { + logger.Info("Adding finalizer to pathResource", "pathResourceName", + pathResource.Name, "NamespacedName", req.NamespacedName.String()) + if ok := controllerutil.AddFinalizer(pathResource, pathFinalizer); !ok { + logger.Error( + err, + "Failed to add finalizer into pathResource", + "pathResourceName", + pathResource.Name, "NamespacedName", req.NamespacedName.String()) + return ctrl.Result{Requeue: true}, nil + } + + if err := r.Update(ctx, pathResource); err != nil { + logger.Error( + err, + "an error occurred when adding finalizer on pathResource", + "pathResourceName", + pathResource.Name, "NamespacedName", req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + if err := r.Get(ctx, req.NamespacedName, pathResource); err != nil { + logger.Error( + err, + "Failed to re-fetch pathResource", + "pathResourceName", + pathResource.Name, "NamespacedName", req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Managing path deletion with a finalizer + // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources + if pathResource.GetDeletionTimestamp() != nil { + logger.Info("pathResource have been marked for deletion", "pathResourceName", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.handleDeletion(ctx, req, pathResource) + } + + return r.handleReconciliation(ctx, req, pathResource) + +} + +func (r *PathReconciler) handleReconciliation( + ctx context.Context, + req reconcile.Request, + pathResource *s3v1alpha1.Path, +) (reconcile.Result, error) { + + logger := log.FromContext(ctx) + + // Create S3Client + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + pathResource.Name, + pathResource.Namespace, + pathResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Path lifecycle management (other than deletion) starts here + + // Check bucket existence on the S3 server + bucketFound, err := s3Client.BucketExists(pathResource.Spec.BucketName) + if err != nil { + logger.Error( + err, + "an error occurred while checking the existence of a bucket", + "bucketName", + pathResource.Spec.BucketName, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.Unreachable, + "Error while checking if bucket already exist", + err, + ) + } + + // If bucket does not exist, the Path CR should be in a failing state + if !bucketFound { + errorBucketNotFound := fmt.Errorf( + "the path CR %s references a non-existing bucket : %s", + pathResource.Name, + pathResource.Spec.BucketName, + ) + logger.Error(errorBucketNotFound, errorBucketNotFound.Error(), "pathResourceName", + pathResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.CreationFailure, + fmt.Sprintf( + "The Path CR [%s] references a non-existing bucket [%s]", + pathResource.Name, + pathResource.Spec.BucketName, + ), + err, + ) + } + + // If the bucket exists, proceed to create or recreate the referenced paths + // For every path on the custom resource's spec, we check the path actually + // exists on the bucket on the S3 server, and create it if it doesn't + // TODO ? : the way this is naively implemented, it's probably costly. Maybe + // we can get the "effectiveBucket" (with its quota and paths) once at the beginning, + // and iterate on this instead of interrogating the S3 server twice for every path. + // But then again, some buckets will likely be filled with many objects outside the + // scope of the CR, so getting all of them might be even more costly. + for _, pathInCr := range pathResource.Spec.Paths { + pathExists, err := s3Client.PathExists(pathResource.Spec.BucketName, pathInCr) + if err != nil { + logger.Error( + err, + "An error occurred while checking a path's existence for bucket ressource", + "path", + pathInCr, + "bucketName", + pathResource.Spec.BucketName, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.Unreachable, + fmt.Sprintf("The check for path [%s] in bucket has failed", pathInCr), + err, + ) + } + + if !pathExists { + err = s3Client.CreatePath(pathResource.Spec.BucketName, pathInCr) + if err != nil { + logger.Error( + err, + "An error occurred while creating a path for bucket ressource", + "path", + pathInCr, + "bucketName", + pathResource.Spec.BucketName, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.Unreachable, + fmt.Sprintf("The creation of path [%s] in bucket has failed", pathInCr), + err, + ) + } + } + } + + return r.SetReconciledCondition( + ctx, + req, + pathResource, + s3v1alpha1.Reconciled, + "Path reconciled", + nil, + ) +} diff --git a/internal/controller/path/reconcile_test.go b/internal/controller/path/reconcile_test.go new file mode 100644 index 0000000..52b5165 --- /dev/null +++ b/internal/controller/path/reconcile_test.go @@ -0,0 +1,156 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller_test + +import ( + "context" + "testing" + + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + path_controller "github.com/InseeFrLab/s3-operator/internal/controller/path" + "github.com/stretchr/testify/assert" +) + +func TestHandleCreate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + pathResource := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-path", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.PathSpec{ + BucketName: "existing-bucket", + Paths: []string{"mypath"}, + S3InstanceRef: "s3-operator/default", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, pathResource}) + + // Create the reconciler + reconciler := &path_controller.PathReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathResource.Name, Namespace: pathResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} + +func TestHandleUpdate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + pathResource := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-path", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.PathSpec{ + BucketName: "existing-bucket", + S3InstanceRef: "s3-operator/default", + Paths: []string{"example"}, + }, + } + + // Create a fake client with a sample CR + pathInvalidResource := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-invalid-paths", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.PathSpec{ + BucketName: "existing-invalid-bucket", + S3InstanceRef: "s3-operator/default", + Paths: []string{"example", "non-existing"}, + }, + } + + pathInvalidResource2 := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-invalid-paths2", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.PathSpec{ + BucketName: "non-existing-bucket", + S3InstanceRef: "s3-operator/default", + Paths: []string{"example", "non-existing"}, + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, pathResource, pathInvalidResource, pathInvalidResource2}) + + // Create the reconciler + reconciler := &path_controller.PathReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathResource.Name, Namespace: pathResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("no error on invalid resource", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathInvalidResource.Name, Namespace: pathInvalidResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("failed create path on non existing bucket", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: pathInvalidResource2.Name, Namespace: pathInvalidResource2.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} diff --git a/internal/controller/path/status.go b/internal/controller/path/status.go new file mode 100644 index 0000000..2456a8c --- /dev/null +++ b/internal/controller/path/status.go @@ -0,0 +1,48 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package path_controller + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *PathReconciler) SetReconciledCondition( + ctx context.Context, + req ctrl.Request, + pathResource *s3v1alpha1.Path, + reason string, + message string, + err error, +) (ctrl.Result, error) { + return r.ControllerHelper.SetReconciledCondition( + ctx, + r.Status(), + req, + pathResource, + &pathResource.Status.Conditions, + s3v1alpha1.ConditionReconciled, + reason, + message, + err, + r.ReconcilePeriod, + ) + +} diff --git a/internal/controller/policy/constants.go b/internal/controller/policy/constants.go new file mode 100644 index 0000000..397d2f6 --- /dev/null +++ b/internal/controller/policy/constants.go @@ -0,0 +1,19 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller + +const policyFinalizer = "s3.onyxia.sh/finalizer" diff --git a/internal/controller/policy/controller.go b/internal/controller/policy/controller.go new file mode 100644 index 0000000..01be910 --- /dev/null +++ b/internal/controller/policy/controller.go @@ -0,0 +1,65 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller + +import ( + "time" + + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=policies/finalizers,verbs=update + +// PolicyReconciler reconciles a Policy object +type PolicyReconciler struct { + client.Client + Scheme *runtime.Scheme + ReconcilePeriod time.Duration + S3factory s3factory.S3Factory + ControllerHelper *helpers.ControllerHelper + S3Instancehelper *helpers.S3InstanceHelper +} + +// SetupWithManager sets up the controller with the Manager. +func (r *PolicyReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&s3v1alpha1.Policy{}). + // REF : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ + WithEventFilter(predicate.Funcs{ + UpdateFunc: func(e event.UpdateEvent) bool { + // Only reconcile if generation has changed + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 10}). + Complete(r) +} diff --git a/internal/controller/policy/finalizer.go b/internal/controller/policy/finalizer.go new file mode 100644 index 0000000..dd9d015 --- /dev/null +++ b/internal/controller/policy/finalizer.go @@ -0,0 +1,131 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller + +import ( + "bytes" + "context" + "encoding/json" + + "github.com/minio/madmin-go/v3" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *PolicyReconciler) finalizePolicy( + ctx context.Context, + policyResource *s3v1alpha1.Policy, +) error { + logger := log.FromContext(ctx) + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + policyResource.Name, + policyResource.Namespace, + policyResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return err + } + if s3Client.GetConfig().PolicyDeletionEnabled { + return s3Client.DeletePolicy(policyResource.Spec.Name) + } + return nil +} + +func (r *PolicyReconciler) handleDeletion( + ctx context.Context, + req reconcile.Request, + policyResource *s3v1alpha1.Policy, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + if controllerutil.ContainsFinalizer(policyResource, policyFinalizer) { + // Run finalization logic for policyFinalizer. If the + // finalization logic fails, don't remove the finalizer so + // that we can retry during the next reconciliation. + if err := r.finalizePolicy(ctx, policyResource); err != nil { + logger.Error( + err, + "An error occurred when attempting to finalize the policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.DeletionFailure, + "Policy deletion has failed", + err, + ) + } + + if ok := controllerutil.RemoveFinalizer(policyResource, policyFinalizer); !ok { + logger.Info( + "Failed to remove finalizer for policyResource", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + if err := r.Update(ctx, policyResource); err != nil { + logger.Error( + err, + "an error occurred when removing finalizer from policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil +} + +func (r *PolicyReconciler) isPolicyMatchingWithCustomResource( + policyResource *s3v1alpha1.Policy, + effectivePolicy *madmin.PolicyInfo, +) (bool, error) { + // The policy content visible in the custom resource usually contains indentations and newlines + // while the one we get from S3 is compacted. In order to compare them, we compact the former. + policyResourceAsByteSlice := []byte(policyResource.Spec.PolicyContent) + buffer := new(bytes.Buffer) + err := json.Compact(buffer, policyResourceAsByteSlice) + if err != nil { + return false, err + } + + // Another gotcha is that the effective policy comes up as a json.RawContent, + // which needs marshalling in order to be properly compared to the []byte we get from the CR. + marshalled, err := json.Marshal(effectivePolicy.Policy) + if err != nil { + return false, err + } + return bytes.Equal(buffer.Bytes(), marshalled), nil +} diff --git a/internal/controller/policy/finalizer_test.go b/internal/controller/policy/finalizer_test.go new file mode 100644 index 0000000..4f4a24f --- /dev/null +++ b/internal/controller/policy/finalizer_test.go @@ -0,0 +1,90 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller_test + +import ( + "context" + "testing" + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + policy_controller "github.com/InseeFrLab/s3-operator/internal/controller/policy" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleDelete(t *testing.T) { + + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + policyResource := &s3v1alpha1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-policy", + Namespace: "default", + Generation: 1, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + }, + Spec: s3v1alpha1.PolicySpec{ + Name: "example-policy", + S3InstanceRef: "s3-operator/default", + PolicyContent: "", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, policyResource}) + + // Create the reconciler + reconciler := &policy_controller.PolicyReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: policyResource.Name, Namespace: policyResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("ressource have been deleted", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: policyResource.Name, Namespace: policyResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + policy := &s3v1alpha1.Policy{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "example-policy", + }, policy) + assert.NotNil(t, err) + assert.ErrorContains(t, err, "policies.s3.onyxia.sh \"example-policy\" not found") + }) + +} diff --git a/internal/controller/policy/reconcile.go b/internal/controller/policy/reconcile.go new file mode 100644 index 0000000..678f06f --- /dev/null +++ b/internal/controller/policy/reconcile.go @@ -0,0 +1,389 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller + +import ( + "context" + "fmt" + + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *PolicyReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Checking for policy resource existence + policyResource := &s3v1alpha1.Policy{} + err := r.Get(ctx, req.NamespacedName, policyResource) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + "The Policy custom resource has been removed ; as such the Policy controller is NOOP.", + "req.Name", + req.Name, + ) + return ctrl.Result{}, nil + } + logger.Error( + err, + "An error occurred when attempting to read the Policy resource from the Kubernetes cluster", + ) + return ctrl.Result{}, err + } + + // Let's just set the status as Unknown when no status are available + if len(policyResource.Status.Conditions) == 0 { + meta.SetStatusCondition( + &policyResource.Status.Conditions, + metav1.Condition{ + Type: s3v1alpha1.ConditionReconciled, + Status: metav1.ConditionUnknown, + ObservedGeneration: policyResource.Generation, + Reason: s3v1alpha1.Reconciling, + Message: "Starting reconciliation", + }, + ) + if err = r.Status().Update(ctx, policyResource); err != nil { + logger.Error( + err, + "Failed to update bucketRessource status", + "bucketName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the policyResource Custom Resource after update the status + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, policyResource); err != nil { + logger.Error( + err, + "Failed to re-fetch policyResource", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(policyResource, policyFinalizer) { + logger.Info("Adding finalizer to policy resource", "PolicyName", + policyResource.Spec.Name, "NamespacedName", req.NamespacedName.String()) + if ok := controllerutil.AddFinalizer(policyResource, policyFinalizer); !ok { + logger.Error( + err, + "Failed to add finalizer into policy resource", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + err = r.Update(ctx, policyResource) + if err != nil { + logger.Error( + err, + "An error occurred when adding finalizer from policyResource", + "policyResource", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the policy Custom Resource after adding the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, policyResource); err != nil { + logger.Error( + err, + "Failed to re-fetch policyResource", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Managing policy deletion with a finalizer + // REF : https://sdk.operatorframework.io/docs/building-operators/golang/advanced-topics/#external-resources + if policyResource.GetDeletionTimestamp() != nil { + logger.Info("policyResource have been marked for deletion", "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.handleDeletion(ctx, req, policyResource) + } + + // Policy lifecycle management (other than deletion) starts here + return r.handleReconciliation(ctx, req, policyResource) + +} + +func (r *PolicyReconciler) handleReconciliation( + ctx context.Context, + req reconcile.Request, + policyResource *s3v1alpha1.Policy, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + policyResource.Name, + policyResource.Namespace, + policyResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Check policy existence on the S3 server + effectivePolicy, err := s3Client.GetPolicyInfo(policyResource.Spec.Name) + if err != nil { + logger.Error( + err, + "An error occurred while checking the existence of a policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Error while checking if policy already exist", + err, + ) + } + + if effectivePolicy == nil { + return r.handleCreation(ctx, req, policyResource) + } + + // If the policy exists on S3, we compare its state to the custom resource that spawned it on K8S + return r.handleUpdate(ctx, req, policyResource) +} + +func (r *PolicyReconciler) handleUpdate( + ctx context.Context, + req reconcile.Request, + policyResource *s3v1alpha1.Policy, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + policyResource.Name, + policyResource.Namespace, + policyResource.Spec.S3InstanceRef, + ) + + if err != nil { + logger.Error(err, "an error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Check policy existence on the S3 server + effectivePolicy, err := s3Client.GetPolicyInfo(policyResource.Spec.Name) + if err != nil { + logger.Error( + err, + "An error occurred while checking the existence of a policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Error while checking if policy already exist", + err, + ) + } + + matching, err := r.isPolicyMatchingWithCustomResource(policyResource, effectivePolicy) + if err != nil { + logger.Error( + err, + "An error occurred while comparing actual and expected configuration for the policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "The comparison between the effective policy [%s] on S3 and its corresponding custom resource on K8S has failed", + policyResource.Spec.Name, + ), + err, + ) + } + + if !matching { + // If not we update the policy to match the CR + err = s3Client.CreateOrUpdatePolicy( + policyResource.Spec.Name, + policyResource.Spec.PolicyContent, + ) + if err != nil { + logger.Error( + err, + "An error occurred while updating the policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "The comparison between the effective policy [%s] on S3 and its corresponding custom resource on K8S has failed", + policyResource.Spec.Name, + ), + err, + ) + } + } + + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Reconciled, + "Policy reconciled", + nil, + ) +} + +func (r *PolicyReconciler) handleCreation(ctx context.Context, req reconcile.Request, + policyResource *s3v1alpha1.Policy) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + policyResource.Name, + policyResource.Namespace, + policyResource.Spec.S3InstanceRef, + ) + + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + err = s3Client.CreateOrUpdatePolicy( + policyResource.Spec.Name, + policyResource.Spec.PolicyContent, + ) + + if err != nil { + logger.Error( + err, + "An error occurred while creating the policy", + "policyName", + policyResource.Spec.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Unreachable, + "Error while creating policy", + err, + ) + } + + return r.SetReconciledCondition( + ctx, + req, + policyResource, + s3v1alpha1.Reconciled, + "Policy reconciled", + err, + ) +} diff --git a/internal/controller/policy/reconcile_test.go b/internal/controller/policy/reconcile_test.go new file mode 100644 index 0000000..1b363d2 --- /dev/null +++ b/internal/controller/policy/reconcile_test.go @@ -0,0 +1,169 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller_test + +import ( + "context" + "testing" + + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + policy_controller "github.com/InseeFrLab/s3-operator/internal/controller/policy" + "github.com/stretchr/testify/assert" +) + +func TestHandleCreate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + policyResource := &s3v1alpha1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-policy", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.PolicySpec{ + Name: "example-policy", + S3InstanceRef: "s3-operator/default", + PolicyContent: "", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, policyResource}) + + // Create the reconciler + reconciler := &policy_controller.PolicyReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: policyResource.Name, Namespace: policyResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} + +func TestHandleUpdate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + existingValidPolicy := []byte(`{ +"Version": "2012-10-17", +"Statement": [ +{ + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": "arn:aws:s3:::my-bucket/*" +} +] +}`) + + existingInvalidPolicy := []byte(`{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": "arn:aws:s3:::my-bucket2/*" + } + ] + }`) + + // Create a fake client with a sample CR + policyResource := &s3v1alpha1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-policy", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.PolicySpec{ + Name: "existing-policy", + S3InstanceRef: "s3-operator/default", + PolicyContent: string(existingValidPolicy), + }, + } + + // Create a fake client with a sample CR + policyInvalidResource := &s3v1alpha1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-invalid-policy", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.PolicySpec{ + Name: "existing-policy", + S3InstanceRef: "s3-operator/default", + PolicyContent: string(existingInvalidPolicy), + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, policyResource, policyInvalidResource}) + + // Create the reconciler + reconciler := &policy_controller.PolicyReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: policyResource.Name, Namespace: policyResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: policyInvalidResource.Name, Namespace: policyInvalidResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) +} diff --git a/internal/controller/policy/status.go b/internal/controller/policy/status.go new file mode 100644 index 0000000..ba780b7 --- /dev/null +++ b/internal/controller/policy/status.go @@ -0,0 +1,47 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package policy_controller + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *PolicyReconciler) SetReconciledCondition( + ctx context.Context, + req ctrl.Request, + policyResource *s3v1alpha1.Policy, + reason string, + message string, + err error, +) (ctrl.Result, error) { + return r.ControllerHelper.SetReconciledCondition( + ctx, + r.Status(), + req, + policyResource, + &policyResource.Status.Conditions, + s3v1alpha1.ConditionReconciled, + reason, + message, + err, + r.ReconcilePeriod, + ) +} diff --git a/internal/controller/s3instance/constants.go b/internal/controller/s3instance/constants.go new file mode 100644 index 0000000..8df1ac6 --- /dev/null +++ b/internal/controller/s3instance/constants.go @@ -0,0 +1,21 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller + +const ( + s3InstanceFinalizer = "s3.onyxia.sh/finalizer" +) diff --git a/internal/controller/s3instance/controller.go b/internal/controller/s3instance/controller.go new file mode 100644 index 0000000..bbd8fc2 --- /dev/null +++ b/internal/controller/s3instance/controller.go @@ -0,0 +1,66 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller + +import ( + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" +) + +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3instances,verbs=get;list;watch;create;update;patch;delete +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3instances/status,verbs=get;update;patch +//+kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3instances/finalizers,verbs=update + +// S3InstanceReconciler reconciles a S3Instance object +type S3InstanceReconciler struct { + client.Client + Scheme *runtime.Scheme + ReconcilePeriod time.Duration + S3factory s3factory.S3Factory + ControllerHelper *helpers.ControllerHelper + S3Instancehelper *helpers.S3InstanceHelper +} + +// SetupWithManager sets up the controller with the Manager.* +func (r *S3InstanceReconciler) SetupWithManager(mgr ctrl.Manager) error { + // filterLogger := ctrl.Log.WithName("filterEvt") + return ctrl.NewControllerManagedBy(mgr). + For(&s3v1alpha1.S3Instance{}). + // See : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ + WithEventFilter(predicate.Funcs{ + // Ignore updates to CR status in which case metadata.Generation does not change, + // unless it is a change to the underlying Secret + UpdateFunc: func(e event.UpdateEvent) bool { + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 10}). + Complete(r) +} diff --git a/internal/controller/s3instance/finalizer.go b/internal/controller/s3instance/finalizer.go new file mode 100644 index 0000000..2a5dae4 --- /dev/null +++ b/internal/controller/s3instance/finalizer.go @@ -0,0 +1,138 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller + +import ( + "context" + "fmt" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *S3InstanceReconciler) handleS3InstanceDeletion( + ctx context.Context, + req ctrl.Request, + s3InstanceResource *s3v1alpha1.S3Instance, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + if controllerutil.ContainsFinalizer(s3InstanceResource, s3InstanceFinalizer) { + logger.Info( + "Performing Finalizer Operations for S3Instance before delete CR", + "Namespace", + s3InstanceResource.GetNamespace(), + "Name", + s3InstanceResource.GetName(), + ) + + // Vérifier les références existantes + if err := r.checkS3InstanceReferences(ctx, s3InstanceResource); err != nil { + return ctrl.Result{}, err + } + + //Remove s3InstanceFinalizer. Once all finalizers have been removed, the object will be deleted. + if ok := controllerutil.RemoveFinalizer(s3InstanceResource, s3InstanceFinalizer); !ok { + logger.Info( + "Failed to remove finalizer for S3Instance", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + // Let's re-fetch the S3Instance Custom Resource after removing the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Update(ctx, s3InstanceResource); err != nil { + logger.Error( + err, + "Failed to remove finalizer for S3Instance", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil +} + +// checkS3InstanceReferences vérifie si l'instance S3 est encore utilisée +func (r *S3InstanceReconciler) checkS3InstanceReferences(ctx context.Context, s3Instance *s3v1alpha1.S3Instance) error { + // Liste des types de ressources à vérifier + references := map[string]client.ObjectList{ + "Buckets": &s3v1alpha1.BucketList{}, + "Policies": &s3v1alpha1.PolicyList{}, + "Paths": &s3v1alpha1.PathList{}, + "S3Users": &s3v1alpha1.S3UserList{}, + } + + for name, list := range references { + if err := r.List(ctx, list); err != nil { + return fmt.Errorf("échec de la récupération des %s : %w", name, err) + } + + if found := r.countReferences(list, s3Instance); found > 0 { + return fmt.Errorf("impossible de supprimer S3Instance, %d %s utilisent cette instance", found, name) + } + } + return nil +} + +// countReferences compte les objets faisant référence à un S3Instance +func (r *S3InstanceReconciler) countReferences(list client.ObjectList, s3Instance *s3v1alpha1.S3Instance) int { + count := 0 + switch objects := list.(type) { + case *s3v1alpha1.BucketList: + for _, obj := range objects.Items { + if r.S3Instancehelper.GetS3InstanceRefInfo(obj.Spec.S3InstanceRef, obj.Namespace). + String() == fmt.Sprintf("%s/%s", s3Instance.Namespace, s3Instance.Name) { + count++ + } + } + case *s3v1alpha1.PathList: + for _, obj := range objects.Items { + if r.S3Instancehelper.GetS3InstanceRefInfo(obj.Spec.S3InstanceRef, obj.Namespace). + String() == fmt.Sprintf("%s/%s", s3Instance.Namespace, s3Instance.Name) { + count++ + } + } + case *s3v1alpha1.S3UserList: + for _, obj := range objects.Items { + if r.S3Instancehelper.GetS3InstanceRefInfo(obj.Spec.S3InstanceRef, obj.Namespace). + String() == fmt.Sprintf("%s/%s", s3Instance.Namespace, s3Instance.Name) { + count++ + } + } + case *s3v1alpha1.PolicyList: + for _, obj := range objects.Items { + if r.S3Instancehelper.GetS3InstanceRefInfo(obj.Spec.S3InstanceRef, obj.Namespace). + String() == fmt.Sprintf("%s/%s", s3Instance.Namespace, s3Instance.Name) { + count++ + } + } + } + + return count +} diff --git a/internal/controller/s3instance/finalizer_test.go b/internal/controller/s3instance/finalizer_test.go new file mode 100644 index 0000000..3ef63d0 --- /dev/null +++ b/internal/controller/s3instance/finalizer_test.go @@ -0,0 +1,335 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller_test + +import ( + "context" + "testing" + "time" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + s3instance_controller "github.com/InseeFrLab/s3-operator/internal/controller/s3instance" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleDelete(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + t.Run("no error", func(t *testing.T) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("error if one bucket ressource still use it", func(t *testing.T) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + bucketResource := &s3v1alpha1.Bucket{ + ObjectMeta: metav1.ObjectMeta{ + Name: "bucket", + Namespace: "default", + }, + Spec: s3v1alpha1.BucketSpec{ + Name: "bucket", + S3InstanceRef: "s3-operator/default", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, bucketResource}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.Error(t, err) + assert.EqualErrorf(t, err, "impossible de supprimer S3Instance, 1 Buckets utilisent cette instance", err.Error()) + }) + + t.Run("error if one policy ressource still use it", func(t *testing.T) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + policyResource := &s3v1alpha1.Policy{ + ObjectMeta: metav1.ObjectMeta{ + Name: "policy", + Namespace: "default", + }, + Spec: s3v1alpha1.PolicySpec{ + S3InstanceRef: "s3-operator/default", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, policyResource}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.Error(t, err) + assert.EqualErrorf(t, err, "impossible de supprimer S3Instance, 1 Policies utilisent cette instance", err.Error()) + }) + + t.Run("error if one path ressource still use it", func(t *testing.T) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + pathResource := &s3v1alpha1.Path{ + ObjectMeta: metav1.ObjectMeta{ + Name: "path", + Namespace: "default", + }, + Spec: s3v1alpha1.PathSpec{ + S3InstanceRef: "s3-operator/default", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, pathResource}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.Error(t, err) + assert.EqualErrorf(t, err, "impossible de supprimer S3Instance, 1 Paths utilisent cette instance", err.Error()) + }) + + t.Run("error if one user ressource still use it", func(t *testing.T) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + userResource := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "user", + Namespace: "default", + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, userResource}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.Error(t, err) + assert.EqualErrorf(t, err, "impossible de supprimer S3Instance, 1 S3Users utilisent cette instance", err.Error()) + }) +} diff --git a/internal/controller/s3instance/reconcile.go b/internal/controller/s3instance/reconcile.go new file mode 100644 index 0000000..548cf3e --- /dev/null +++ b/internal/controller/s3instance/reconcile.go @@ -0,0 +1,201 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller + +import ( + "context" + "fmt" + + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *S3InstanceReconciler) Reconcile( + ctx context.Context, + req ctrl.Request, +) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Checking for s3InstanceResource existence + s3InstanceResource := &s3v1alpha1.S3Instance{} + err := r.Get(ctx, req.NamespacedName, s3InstanceResource) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + fmt.Sprintf("The S3InstanceResource CR %s has been removed. NOOP", req.Name), + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, nil + } + logger.Error( + err, + "Failed to get S3InstanceResource", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's just set the status as Unknown when no status are available + if len(s3InstanceResource.Status.Conditions) == 0 { + meta.SetStatusCondition( + &s3InstanceResource.Status.Conditions, + metav1.Condition{ + Type: s3v1alpha1.ConditionReconciled, + Status: metav1.ConditionUnknown, + ObservedGeneration: s3InstanceResource.Generation, + Reason: s3v1alpha1.Reconciling, + Message: "Starting reconciliation", + }, + ) + if err = r.Status().Update(ctx, s3InstanceResource); err != nil { + logger.Error( + err, + "Failed to update s3InstanceResource status", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the s3InstanceResource Custom Resource after update the status + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, s3InstanceResource); err != nil { + logger.Error( + err, + "Failed to re-fetch s3Instance", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(s3InstanceResource, s3InstanceFinalizer) { + logger.Info("Adding finalizer to s3Instance", "NamespacedName", req.NamespacedName.String()) + if ok := controllerutil.AddFinalizer(s3InstanceResource, s3InstanceFinalizer); !ok { + logger.Error( + err, + "Failed to add finalizer into the s3Instance", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + if err = r.Update(ctx, s3InstanceResource); err != nil { + logger.Error( + err, + "an error occurred when adding finalizer on s3Instance", + "s3Instance", + s3InstanceResource.Name, + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the S3Instance Custom Resource after adding the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, s3InstanceResource); err != nil { + logger.Error( + err, + "Failed to re-fetch s3Instance", + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + } + + // Check if the s3InstanceResource instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. The object will be deleted. + if s3InstanceResource.GetDeletionTimestamp() != nil { + logger.Info("s3InstanceResource have been marked for deletion") + return r.handleS3InstanceDeletion(ctx, req, s3InstanceResource) + } + + // Reconciliation starts here + return r.handleReconciliation(ctx, req, s3InstanceResource) + +} + +func (r *S3InstanceReconciler) handleReconciliation( + ctx context.Context, + req reconcile.Request, + s3InstanceResource *s3v1alpha1.S3Instance, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientFromS3Instance(ctx, r.Client, r.S3factory, s3InstanceResource) + + if err != nil { + logger.Error( + err, + "Could not generate s3Instance", + "s3InstanceSecretRefName", + s3InstanceResource.Spec.SecretRef, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition(ctx, req, s3InstanceResource, s3v1alpha1.Unreachable, + "Failed to generate S3Instance ", err) + } + + _, err = s3Client.ListBuckets() + if err != nil { + logger.Error( + err, + "Could not generate s3Instance", + "s3InstanceName", + s3InstanceResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition(ctx, req, s3InstanceResource, s3v1alpha1.CreationFailure, + "Failed to generate S3Instance ", err) + } + + return r.SetReconciledCondition( + ctx, + req, + s3InstanceResource, + s3v1alpha1.Reconciled, + "S3Instance instance reconciled", + nil, + ) + +} diff --git a/internal/controller/s3instance/reconcile_test.go b/internal/controller/s3instance/reconcile_test.go new file mode 100644 index 0000000..94ee103 --- /dev/null +++ b/internal/controller/s3instance/reconcile_test.go @@ -0,0 +1,156 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller_test + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + s3instance_controller "github.com/InseeFrLab/s3-operator/internal/controller/s3instance" + "github.com/stretchr/testify/assert" +) + +func TestHandleCreate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + s3instanceResourceInvalid := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.invalid.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "invalid-instance", + Namespace: "s3-operator", + }, + } + + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3instanceResourceInvalid}) + + // Create the reconciler + reconciler := &s3instance_controller.S3InstanceReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("finalizer is added", func(t *testing.T) { + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + + // FetchReconciledInstance + reconciledInstance := &s3v1alpha1.S3Instance{} + _ = testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "s3-operator", + Name: "default", + }, reconciledInstance) + + assert.Equal(t, "s3.onyxia.sh/finalizer", reconciledInstance.Finalizers[0]) + }) + + t.Run("status is reconciled", func(t *testing.T) { + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + + // FetchReconciledInstance + reconciledInstance := &s3v1alpha1.S3Instance{} + _ = testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "s3-operator", + Name: "default", + }, reconciledInstance) + + assert.Equal(t, "Reconciled", reconciledInstance.Status.Conditions[0].Reason) + }) + + t.Run("reason is creation failure because of invalid client", func(t *testing.T) { + + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResourceInvalid.Name, Namespace: s3instanceResourceInvalid.Namespace}} + reconciler.Reconcile(context.TODO(), req) + + // 4️⃣ FetchReconciledInstance + reconciledInstance := &s3v1alpha1.S3Instance{} + _ = testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "s3-operator", + Name: "invalid-instance", + }, reconciledInstance) + + assert.Equal(t, "CreationFailure", reconciledInstance.Status.Conditions[0].Reason) + }) +} diff --git a/internal/controller/s3instance/status.go b/internal/controller/s3instance/status.go new file mode 100644 index 0000000..ad82e68 --- /dev/null +++ b/internal/controller/s3instance/status.go @@ -0,0 +1,47 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3instance_controller + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *S3InstanceReconciler) SetReconciledCondition( + ctx context.Context, + req ctrl.Request, + s3InstanceResource *s3v1alpha1.S3Instance, + reason string, + message string, + err error, +) (ctrl.Result, error) { + return r.ControllerHelper.SetReconciledCondition( + ctx, + r.Status(), + req, + s3InstanceResource, + &s3InstanceResource.Status.Conditions, + s3v1alpha1.ConditionReconciled, + reason, + message, + err, + r.ReconcilePeriod, + ) +} diff --git a/controllers/suite_test.go b/internal/controller/suite_test.go similarity index 73% rename from controllers/suite_test.go rename to internal/controller/suite_test.go index a4ae390..53390d2 100644 --- a/controllers/suite_test.go +++ b/internal/controller/suite_test.go @@ -14,10 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package controllers +package controller import ( + "fmt" "path/filepath" + "runtime" "testing" . "github.com/onsi/ginkgo/v2" @@ -41,7 +43,7 @@ var cfg *rest.Config var k8sClient client.Client var testEnv *envtest.Environment -func TestAPIs(t *testing.T) { +func TestControllers(t *testing.T) { RegisterFailHandler(Fail) RunSpecs(t, "Controller Suite") @@ -52,8 +54,16 @@ var _ = BeforeSuite(func() { By("bootstrapping test environment") testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "config", "crd", "bases")}, + CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, ErrorIfCRDPathMissing: true, + + // The BinaryAssetsDirectory is only required if you want to run the tests directly + // without call the makefile target test. If not informed it will look for the + // default path defined in controller-runtime which is /usr/local/kubebuilder/. + // Note that you must have the required binaries setup under the bin directory to perform + // the tests directly. When we run make test it will be setup and used automatically. + BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", + fmt.Sprintf("1.29.0-%s-%s", runtime.GOOS, runtime.GOARCH)), } var err error diff --git a/internal/controller/user/constants.go b/internal/controller/user/constants.go new file mode 100644 index 0000000..650d1d0 --- /dev/null +++ b/internal/controller/user/constants.go @@ -0,0 +1,21 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +const ( + userFinalizer = "s3.onyxia.sh/userFinalizer" +) diff --git a/internal/controller/user/controller.go b/internal/controller/user/controller.go new file mode 100644 index 0000000..717ceb4 --- /dev/null +++ b/internal/controller/user/controller.go @@ -0,0 +1,93 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +import ( + "time" + + "github.com/InseeFrLab/s3-operator/internal/helpers" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/predicate" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3users,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3users/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=s3.onyxia.sh,resources=s3users/finalizers,verbs=update +// +kubebuilder:rbac:groups="",resources=secrets,verbs=get;list;watch;create;update;delete +// +kubebuilder:rbac:groups="",resources=secrets/status,verbs=get;update;patch +// +kubebuilder:rbac:groups="",resources=secrets/finalizers,verbs=update + +// S3UserReconciler reconciles a S3User object +type S3UserReconciler struct { + client.Client + Scheme *runtime.Scheme + OverrideExistingSecret bool + ReconcilePeriod time.Duration + S3factory s3factory.S3Factory + ControllerHelper *helpers.ControllerHelper + S3Instancehelper *helpers.S3InstanceHelper + PasswordGeneratorHelper *helpers.PasswordGenerator +} + +// SetupWithManager sets up the controller with the Manager.* +func (r *S3UserReconciler) SetupWithManager(mgr ctrl.Manager) error { + // filterLogger := ctrl.Log.WithName("filterEvt") + return ctrl.NewControllerManagedBy(mgr). + For(&s3v1alpha1.S3User{}). + // The "secret owning" implies the reconcile loop will be called whenever a Secret owned + // by a S3User is created/updated/deleted. In other words, even when creating a single S3User, + // there is going to be several iterations. + Owns(&corev1.Secret{}). + // See : https://sdk.operatorframework.io/docs/building-operators/golang/references/event-filtering/ + WithEventFilter(predicate.Funcs{ + + // Ignore updates to CR status in which case metadata.Generation does not change, + // unless it is a change to the underlying Secret + UpdateFunc: func(e event.UpdateEvent) bool { + + // To check if the update event is tied to a change on secret, + // we try to cast e.ObjectNew to a secret (only if it's not a S3User, which + // should prevent any TypeAssertionError based panic). + secretUpdate := false + newUser, _ := e.ObjectNew.(*s3v1alpha1.S3User) + if newUser == nil { + secretUpdate = (e.ObjectNew.(*corev1.Secret) != nil) + } + + return e.ObjectOld.GetGeneration() != e.ObjectNew.GetGeneration() || secretUpdate + }, + // Ignore create events caused by the underlying secret's creation + CreateFunc: func(e event.CreateEvent) bool { + user, _ := e.Object.(*s3v1alpha1.S3User) + return user != nil + }, + DeleteFunc: func(e event.DeleteEvent) bool { + // Evaluates to false if the object has been confirmed deleted. + return !e.DeleteStateUnknown + }, + }). + WithOptions(controller.Options{MaxConcurrentReconciles: 10}). + Complete(r) +} diff --git a/internal/controller/user/finalizer.go b/internal/controller/user/finalizer.go new file mode 100644 index 0000000..863859f --- /dev/null +++ b/internal/controller/user/finalizer.go @@ -0,0 +1,151 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *S3UserReconciler) finalizeS3User( + ctx context.Context, + userResource *s3v1alpha1.S3User, +) error { + logger := log.FromContext(ctx) + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + userResource.Name, + userResource.Namespace, + userResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return err + } + if s3Client.GetConfig().S3UserDeletionEnabled { + return s3Client.DeleteUser(userResource.Spec.AccessKey) + } + return nil +} + +func (r *S3UserReconciler) handleDeletion( + ctx context.Context, + req reconcile.Request, + userResource *s3v1alpha1.S3User, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + if controllerutil.ContainsFinalizer(userResource, userFinalizer) { + // Run finalization logic for S3UserFinalizer. If the finalization logic fails, don't remove the finalizer so that we can retry during the next reconciliation. + if err := r.finalizeS3User(ctx, userResource); err != nil { + logger.Error( + err, + "An error occurred when attempting to finalize the user", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.DeletionFailure, + "user deletion has failed", + err, + ) + } + + err := r.deleteOldLinkedSecret(ctx, userResource) + if err != nil { + logger.Error( + err, + "An error occurred when trying to clean old secret linked to user", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.DeletionFailure, + "Deletion of old secret associated to user have failed", + err, + ) + } + + userOwnedSecret, _ := r.getUserSecret(ctx, userResource) + if err := r.deleteSecret(ctx, &userOwnedSecret); err != nil { + logger.Error( + err, + "An error occurred when trying to clean secret linked to user", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.DeletionFailure, + "Deletion of secret associated to user have failed", + err, + ) + } + + //Remove userFinalizer. Once all finalizers have been removed, the object will be deleted. + if ok := controllerutil.RemoveFinalizer(userResource, userFinalizer); !ok { + logger.Info( + "Failed to remove finalizer for user resource", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + // Unsure why the behavior is different to that of bucket/policy/path controllers, but it appears + // calling r.Update() for adding/removal of finalizer is not necessary (an update event is generated + // with the call to AddFinalizer/RemoveFinalizer), and worse, causes "freshness" problem (with the + // "the object has been modified; please apply your changes to the latest version and try again" error) + err = r.Update(ctx, userResource) + if err != nil { + logger.Error( + err, + "An error occurred when removing finalizer from policy", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + return ctrl.Result{}, nil +} diff --git a/internal/controller/user/finalizer_test.go b/internal/controller/user/finalizer_test.go new file mode 100644 index 0000000..9803fa4 --- /dev/null +++ b/internal/controller/user/finalizer_test.go @@ -0,0 +1,133 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +package user_controller_test + +import ( + "context" + "testing" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + user_controller "github.com/InseeFrLab/s3-operator/internal/controller/user" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleDelete(t *testing.T) { + + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + s3UserResource := &s3v1alpha1.S3User{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "s3.onyxia.sh/v1alpha1", + Kind: "S3User", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/userFinalizer"}, + UID: "6c8dceca-f7df-469d-80a5-1afed9e4d710", + }, + Spec: s3v1alpha1.S3UserSpec{ + AccessKey: "existing-valid-user", + Policies: []string{"admin"}, + SecretName: "existing-valid-user-credentials", + S3InstanceRef: "s3-operator/default", + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + + blockOwnerDeletion := true + controller := true + s3UserSecretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user-credentials", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: s3UserResource.APIVersion, + Kind: s3UserResource.Kind, + Name: s3UserResource.Name, + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &controller, + UID: s3UserResource.UID, + }, + }, + }, + Data: map[string][]byte{ + "accessKey": []byte("existing-valid-user"), + "secretKey": []byte("validSecret"), + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3UserResource, s3UserSecretResource}) + + // Create the reconciler + reconciler := &user_controller.S3UserReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + testUtils.Client.Delete(context.TODO(), s3UserResource) + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("ressource have been deleted", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + s3UserResource := &s3v1alpha1.S3User{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "example-user", + }, s3UserResource) + assert.NotNil(t, err) + assert.ErrorContains(t, err, "s3users.s3.onyxia.sh \"example-user\" not found") + + s3UserSecret := &corev1.Secret{} + err = testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "existing-valid-user-credentials", + }, s3UserSecret) + assert.NotNil(t, err) + assert.ErrorContains(t, err, "secrets \"existing-valid-user-credentials\" not found") + + }) + +} diff --git a/internal/controller/user/reconcile.go b/internal/controller/user/reconcile.go new file mode 100644 index 0000000..e125805 --- /dev/null +++ b/internal/controller/user/reconcile.go @@ -0,0 +1,882 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +import ( + "context" + "fmt" + "slices" + + corev1 "k8s.io/api/core/v1" + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +// Reconcile is part of the main kubernetes reconciliation loop which aims to +// move the current state of the cluster closer to the desired state. +// +// For more details, check Reconcile and its Result here: +// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.14.1/pkg/reconcile +func (r *S3UserReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + logger := log.FromContext(ctx) + + // Checking for userResource existence + userResource := &s3v1alpha1.S3User{} + err := r.Get(ctx, req.NamespacedName, userResource) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + fmt.Sprintf( + "The S3User CR %s (or its owned Secret) has been removed. NOOP", + req.Name, + ), + ) + return ctrl.Result{}, nil + } + logger.Error(err, "An error occurred when fetching the S3User from Kubernetes") + return ctrl.Result{}, err + } + + // Let's just set the status as Unknown when no status are available + if len(userResource.Status.Conditions) == 0 { + meta.SetStatusCondition( + &userResource.Status.Conditions, + metav1.Condition{ + Type: s3v1alpha1.ConditionReconciled, + Status: metav1.ConditionUnknown, + ObservedGeneration: userResource.Generation, + Reason: s3v1alpha1.Reconciling, + Message: "Starting reconciliation", + }, + ) + if err = r.Status().Update(ctx, userResource); err != nil { + logger.Error( + err, + "Failed to update userRessource status", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the userResource Custom Resource after update the status + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, userResource); err != nil { + logger.Error( + err, + "Failed to re-fetch userResource", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Add finalizer for this CR + if !controllerutil.ContainsFinalizer(userResource, userFinalizer) { + logger.Info("Adding finalizer to user resource", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + + if ok := controllerutil.AddFinalizer(userResource, userFinalizer); !ok { + logger.Error( + err, + "Failed to add finalizer into user resource", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{Requeue: true}, nil + } + + if err = r.Update(ctx, userResource); err != nil { + logger.Error( + err, + "An error occurred when adding finalizer from user", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + + // Let's re-fetch the userResource Custom Resource after adding the finalizer + // so that we have the latest state of the resource on the cluster and we will avoid + // raise the issue "the object has been modified, please apply + // your changes to the latest version and try again" which would re-trigger the reconciliation + // if we try to update it again in the following operations + if err := r.Get(ctx, req.NamespacedName, userResource); err != nil { + logger.Error( + err, + "Failed to re-fetch userResource", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return ctrl.Result{}, err + } + } + + // Check if the userResource instance is marked to be deleted, which is + // indicated by the deletion timestamp being set. The object will be deleted. + if userResource.GetDeletionTimestamp() != nil { + logger.Info("userResource have been marked for deletion", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.handleDeletion(ctx, req, userResource) + } + + return r.handleReconciliation(ctx, req, userResource) + +} + +func (r *S3UserReconciler) handleReconciliation( + ctx context.Context, + req reconcile.Request, + userResource *s3v1alpha1.S3User, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + userResource.Name, + userResource.Namespace, + userResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + found, err := s3Client.UserExist(userResource.Spec.AccessKey) + if err != nil { + logger.Error( + err, + "An error occurred while checking the existence of a user", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Fail to check existence", + err, + ) + } + + if !found { + return r.handleCreate(ctx, req, userResource) + } + return r.handleUpdate(ctx, req, userResource) +} + +func (r *S3UserReconciler) handleUpdate( + ctx context.Context, + req reconcile.Request, + userResource *s3v1alpha1.S3User, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + // Create S3Client + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + userResource.Name, + userResource.Namespace, + userResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + err = r.deleteOldLinkedSecret(ctx, userResource) + if err != nil { + logger.Error( + err, + "An error occurred when trying to clean old secret linked to user", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Deletion of old secret associated to user have failed", + err, + ) + } + + userOwnedSecret, err := r.getUserSecret(ctx, userResource) + if err != nil { + if err.Error() == "SecretListingFailed" { + logger.Error( + err, + "An error occurred when trying to obtain the user's secret", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + + err = r.deleteSecret(ctx, &userOwnedSecret) + if err != nil { + logger.Error( + err, + "Deletion of secret associated to user have failed", + "userResource", + userResource.Name, + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Deletion of secret associated to user have failed", + err, + ) + + } + err = s3Client.DeleteUser(userResource.Spec.AccessKey) + if err != nil { + logger.Error(err, "Could not delete user on S3 server", "userResource", + userResource.Name, + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "Deletion of S3user %s on S3 server has failed", + userResource.Name, + ), + err, + ) + + } + return r.handleCreate(ctx, req, userResource) + } else if err.Error() == "S3UserSecretNameMismatch" { + logger.Info("A secret with owner reference to the user was found, but its name doesn't match the spec. This is probably due to the S3User's spec changing (specifically spec.secretName being added, changed or removed). The \"old\" secret will be deleted.", "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + err = r.deleteSecret(ctx, &userOwnedSecret) + if err != nil { + logger.Error(err, "Deletion of secret associated to user have failed", "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Deletion of secret associated to user have failed", + err, + ) + + } + } + } + + if userOwnedSecret.Name == "" { + logger.Info( + "Secret associated to user not found, user will be deleted from the S3 backend, then recreated with a secret", + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + + err = s3Client.DeleteUser(userResource.Spec.AccessKey) + if err != nil { + logger.Error(err, "Could not delete user on S3 server", "userResource", + userResource.Name, + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "Deletion of S3user %s on S3 server has failed", + userResource.Name, + ), + err, + ) + } + return r.handleCreate(ctx, req, userResource) + } + + logger.Info("Checking user policies", "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + userPolicies, err := s3Client.GetUserPolicies(userResource.Spec.AccessKey) + if err != nil { + logger.Error(err, "Could not check the user's policies", "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Checking the S3user policies has failed", + err, + ) + } + + policyToDelete := []string{} + policyToAdd := []string{} + for _, policy := range userPolicies { + policyFound := slices.Contains(userResource.Spec.Policies, policy) + if !policyFound { + logger.Info( + fmt.Sprintf("S3User policy definition doesn't contain policy %s", policy), + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + policyToDelete = append(policyToDelete, policy) + } + } + + for _, policy := range userResource.Spec.Policies { + policyFound := slices.Contains(userPolicies, policy) + if !policyFound { + logger.Info( + fmt.Sprintf("S3User policy definition must contain policy %s", policy), + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + policyToAdd = append(policyToAdd, policy) + } + } + + if len(policyToDelete) > 0 { + err = s3Client.RemovePoliciesFromUser(userResource.Spec.AccessKey, policyToDelete) + if err != nil { + logger.Error( + err, + "An error occurred while removing policy to user", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Error while updating policies of user", + err, + ) + } + } + + if len(policyToAdd) > 0 { + err := s3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policyToAdd) + if err != nil { + logger.Error( + err, + "An error occurred while adding policy to user", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Error while updating policies of user", + err, + ) + + } + } + + credentialsValid, err := s3Client.CheckUserCredentialsValid( + userResource.Name, + string(userOwnedSecret.Data[userResource.Spec.SecretFieldNameAccessKey]), + string(userOwnedSecret.Data[userResource.Spec.SecretFieldNameSecretKey]), + ) + + if err != nil { + logger.Error( + err, + "An error occurred when checking if user credentials were valid", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Checking credentials on S3 server has failed", + err, + ) + } + + if !credentialsValid { + logger.Info( + "The secret containing the credentials will be deleted, and the user will be deleted from the S3 backend, then recreated (through another reconcile)", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + err = r.deleteSecret(ctx, &userOwnedSecret) + if err != nil { + logger.Error(err, "Deletion of secret associated to user have failed", "userResource", + userResource.Name, + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Deletion of secret associated to user have failed", + err, + ) + + } + err = s3Client.DeleteUser(userResource.Spec.AccessKey) + if err != nil { + logger.Error(err, "Could not delete user on S3 server", "userResource", + userResource.Name, + "userResourceName", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + fmt.Sprintf( + "Deletion of S3user %s on S3 server has failed", + userResource.Name, + ), + err, + ) + + } + return r.handleCreate(ctx, req, userResource) + } + + logger.Info("User was reconciled without error", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Reconciled, + "user reconciled", + err, + ) +} + +func (r *S3UserReconciler) handleCreate( + ctx context.Context, + req reconcile.Request, + userResource *s3v1alpha1.S3User, +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + // Create S3Client + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + userResource.Name, + userResource.Namespace, + userResource.Spec.S3InstanceRef, + ) + if err != nil { + logger.Error(err, "An error occurred while getting s3Client") + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Failed to generate s3client from instance", + err, + ) + } + + // Generating a random secret key + secretKey, err := r.PasswordGeneratorHelper.Generate(20, true, false, true) + if err != nil { + + logger.Error(err, fmt.Sprintf("Fail to generate password for user %s", userResource.Name), + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "An error occurred when attempting to generate password for user", + err, + ) + } + + // Create a new K8S Secret to hold the user's accessKey and secretKey + secret, err := r.newSecretForCR( + ctx, + userResource, + map[string][]byte{ + userResource.Spec.SecretFieldNameAccessKey: []byte(userResource.Spec.AccessKey), + userResource.Spec.SecretFieldNameSecretKey: []byte(secretKey)}, + ) + if err != nil { + // Error while creating the Kubernetes secret - requeue the request. + logger.Error(err, "Could not generate Kubernetes secret", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Generation of associated k8s Secret has failed", + err, + ) + } + + // For managing user creation, we first check if a Secret matching + // the user's spec (not matching the owner reference) exists + existingK8sSecret := &corev1.Secret{} + err = r.Get( + ctx, + types.NamespacedName{Name: secret.Name, Namespace: secret.Namespace}, + existingK8sSecret, + ) + + // If none exist : we create the user, then the secret + if err != nil && k8sapierrors.IsNotFound(err) { + logger.Info( + "No secret found ; creating a new Secret", + "Secret.Namespace", + secret.Namespace, + "Secret.Name", + secret.Name, + ) + + // Creating the user + err = s3Client.CreateUser(userResource.Spec.AccessKey, secretKey) + + if err != nil { + logger.Error( + err, + "An error occurred while creating user on S3 server", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Creation of user on S3 instance has failed", + err, + ) + } + + // Creating the secret + logger.Info( + "Creating a new secret to store the user's credentials", + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + err = r.Create(ctx, secret) + if err != nil { + logger.Error(err, "Could not create secret for user", + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Creation of secret for user has failed", + err, + ) + } + + // Add policies + err = r.addPoliciesToUser(ctx, userResource) + if err != nil { + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Error while updating policies of user on S3 instance", + err, + ) + } + + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Reconciled, + "User reconciled", + err, + ) + + } else if err != nil { + logger.Error(err, "Couldn't check secret existence", + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Fail to check if an existing secret already exist", + err, + ) + } else { + // If a secret already exists, but has a different S3User owner reference, then the creation should + // fail with no requeue, and use the status to inform that the spec should be changed + for _, ref := range existingK8sSecret.OwnerReferences { + if ref.Kind == "S3User" { + if ref.UID != userResource.UID { + logger.Error(fmt.Errorf(""), "The secret matching the new S3User's spec is owned by a different S3User.", + "conflictingUser", + ref.Name, + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.CreationFailure, + fmt.Sprintf("The secret matching the new S3User's spec is owned by a different, pre-existing S3User (%s). The S3User being created now (%s) won't be created on the S3 backend until its spec changes to target a different secret", ref.Name, userResource.Name), + err, + ) + } + } + } + + if r.OverrideExistingSecret { + // Case 3.2 : they are not valid, but the operator is configured to overwrite it + logger.Info(fmt.Sprintf("A secret with the name %s already exists ; it will be overwritten because of operator configuration", secret.Name), "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + + // Creating the user + err = s3Client.CreateUser(userResource.Spec.AccessKey, secretKey) + if err != nil { + logger.Error( + err, + "An error occurred while creating user on S3 server", + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Creation of user on S3 instance has failed", + err, + ) + } + + // Updating the secret + logger.Info("Updating the pre-existing secret with new credentials", + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String(), + ) + err = r.Update(ctx, secret) + if err != nil { + logger.Error(err, "Could not update secret", "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Update of secret have failed", + err, + ) + } + + // Add policies + err = r.addPoliciesToUser(ctx, userResource) + if err != nil { + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Unreachable, + "Error while updating associated policy", + err, + ) + } + + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.Reconciled, + "User Reconciled", + err, + ) + } + + // Case 3.3 : they are not valid, and the operator is configured keep the existing secret + // The user will not be created, with no requeue and with two possible ways out : either toggle + // OverrideExistingSecret on, or delete the S3User whose credentials are not working anyway. + logger.Error(fmt.Errorf(""), + "A secret with the same name already exists ; as the operator is configured to NOT override any pre-existing secrets, this user will not be created on S3 backend until spec change (to target new secret), or until the operator configuration is changed to override existing secrets", + "secretName", + secret.Name, + "userResource", + userResource.Name, + "NamespacedName", + req.NamespacedName.String()) + return r.SetReconciledCondition( + ctx, + req, + userResource, + s3v1alpha1.CreationFailure, + "Creation of user on S3 instance has failed necause secret contains invalid credentials. The user's spec should be changed to target a different secret", + err, + ) + } +} diff --git a/internal/controller/user/reconcile_test.go b/internal/controller/user/reconcile_test.go new file mode 100644 index 0000000..9ba3c83 --- /dev/null +++ b/internal/controller/user/reconcile_test.go @@ -0,0 +1,302 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller_test + +import ( + "context" + "testing" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + user_controller "github.com/InseeFrLab/s3-operator/internal/controller/user" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func TestHandleCreate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Create a fake client with a sample CR + s3UserResource := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-user", + Namespace: "default", + Generation: 1, + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + AccessKey: "example-user", + SecretName: "example-user-secret", + Policies: []string{"admin"}, + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + + // Create a fake client with a sample CR + s3UserUsingNotAllowedS3Instance := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "example-user", + Namespace: "unauthorized", + Generation: 1, + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + AccessKey: "example-user", + SecretName: "example-user-secret", + Policies: []string{"admin"}, + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3UserResource, s3UserUsingNotAllowedS3Instance}) + + // Create the reconciler + reconciler := &user_controller.S3UserReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("error if using invalidS3Instance", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserUsingNotAllowedS3Instance.Name, Namespace: s3UserUsingNotAllowedS3Instance.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NotNil(t, err) + }) + + t.Run("secret is created", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + + secretCreated := &corev1.Secret{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "example-user-secret", + }, secretCreated) + assert.NoError(t, err) + assert.Equal(t, "example-user", string(secretCreated.Data["accessKey"])) + assert.GreaterOrEqual(t, len(string(secretCreated.Data["secretKey"])), 20) + + }) +} + +func TestHandleUpdate(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + t.Run("valid user", func(t *testing.T) { + // Create a fake client with a sample CR + s3UserResource := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/userFinalizer"}, + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + AccessKey: "existing-valid-user", + SecretName: "existing-valid-user-secret", + Policies: []string{"admin"}, + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + + secretS3UserResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "accessKey": []byte("existing-valid-user"), + "secretKey": []byte("validSecret"), + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3UserResource, secretS3UserResource}) + + // Create the reconciler + reconciler := &user_controller.S3UserReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + }) + + t.Run("invalid user password", func(t *testing.T) { + // Create a fake client with a sample CR + s3UserResource := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/userFinalizer"}, + UID: "6c8dceca-f7df-469d-80a5-1afed9e4d710", + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + AccessKey: "existing-valid-user", + SecretName: "existing-valid-user-secret", + Policies: []string{"admin"}, + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + blockOwnerDeletion := true + controller := true + secretS3UserResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user-secret", + Namespace: "default", + OwnerReferences: []metav1.OwnerReference{ + { + APIVersion: s3UserResource.APIVersion, + Kind: s3UserResource.Kind, + Name: s3UserResource.Name, + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &controller, + UID: s3UserResource.UID, + }, + }, + }, + Data: map[string][]byte{ + "accessKey": []byte("existing-valid-user"), + "secretKey": []byte("invalidSecret"), + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3UserResource, secretS3UserResource}) + + // Create the reconciler + reconciler := &user_controller.S3UserReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + + t.Run("secret have changed", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + reconciler.Reconcile(context.TODO(), req) + + secretCreated := &corev1.Secret{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "default", + Name: "existing-valid-user-secret", + }, secretCreated) + assert.NoError(t, err) + assert.Equal(t, "existing-valid-user", string(secretCreated.Data["accessKey"])) + assert.NotEqualValues(t, string(secretCreated.Data["secretKey"]), "invalidSecret") + }) + }) + + t.Run("invalid user policy", func(t *testing.T) { + // Create a fake client with a sample CR + s3UserResource := &s3v1alpha1.S3User{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user", + Namespace: "default", + Generation: 1, + Finalizers: []string{"s3.onyxia.sh/userFinalizer"}, + }, + Spec: s3v1alpha1.S3UserSpec{ + S3InstanceRef: "s3-operator/default", + AccessKey: "existing-valid-user", + SecretName: "existing-valid-user", + Policies: []string{"admin", "missing-policy"}, + SecretFieldNameAccessKey: "accessKey", + SecretFieldNameSecretKey: "secretKey", + }, + } + + secretS3UserResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "existing-valid-user-secret", + Namespace: "default", + }, + Data: map[string][]byte{ + "accessKey": []byte("existing-valid-user"), + "secretKey": []byte("validSecret"), + }, + } + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + s3instanceResource, secretResource := testUtils.GenerateBasicS3InstanceAndSecret() + testUtils.SetupClient([]client.Object{s3instanceResource, secretResource, s3UserResource, secretS3UserResource}) + + // Create the reconciler + reconciler := &user_controller.S3UserReconciler{ + Client: testUtils.Client, + Scheme: testUtils.Client.Scheme(), + S3factory: testUtils.S3Factory, + } + + t.Run("no error", func(t *testing.T) { + // Call Reconcile function + req := ctrl.Request{NamespacedName: types.NamespacedName{Name: s3UserResource.Name, Namespace: s3UserResource.Namespace}} + _, err := reconciler.Reconcile(context.TODO(), req) + assert.NoError(t, err) + }) + }) + +} diff --git a/internal/controller/user/status.go b/internal/controller/user/status.go new file mode 100644 index 0000000..990a159 --- /dev/null +++ b/internal/controller/user/status.go @@ -0,0 +1,47 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +import ( + "context" + + ctrl "sigs.k8s.io/controller-runtime" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *S3UserReconciler) SetReconciledCondition( + ctx context.Context, + req ctrl.Request, + userResource *s3v1alpha1.S3User, + reason string, + message string, + err error, +) (ctrl.Result, error) { + return r.ControllerHelper.SetReconciledCondition( + ctx, + r.Status(), + req, + userResource, + &userResource.Status.Conditions, + s3v1alpha1.ConditionReconciled, + reason, + message, + err, + r.ReconcilePeriod, + ) +} diff --git a/internal/controller/user/utils.go b/internal/controller/user/utils.go new file mode 100644 index 0000000..3968263 --- /dev/null +++ b/internal/controller/user/utils.go @@ -0,0 +1,186 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package user_controller + +import ( + "cmp" + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" +) + +func (r *S3UserReconciler) addPoliciesToUser( + ctx context.Context, + userResource *s3v1alpha1.S3User, +) error { + logger := log.FromContext(ctx) + // Create S3Client + s3Client, err := r.S3Instancehelper.GetS3ClientForRessource( + ctx, + r.Client, + r.S3factory, + userResource.Name, + userResource.Namespace, + userResource.Spec.S3InstanceRef, + ) + if err != nil { + return err + } + policies := userResource.Spec.Policies + if policies != nil { + err := s3Client.AddPoliciesToUser(userResource.Spec.AccessKey, policies) + if err != nil { + logger.Error( + err, + "An error occurred while adding policy to user", + "user", + userResource.Name, + ) + return err + } + } + return nil +} + +func (r *S3UserReconciler) deleteOldLinkedSecret(ctx context.Context, userResource *s3v1alpha1.S3User) error { + logger := log.FromContext(ctx) + secretsList := &corev1.SecretList{} + + // Define options with label selector and namespace + listOpts := []client.ListOption{ + client.InNamespace(userResource.Namespace), // Filter by namespace + client.MatchingLabels{"app.kubernetes.io/created-by": "s3-operator"}, // Filter by label + } + + // List Secrets with the specified label in the given namespace + if err := r.List(ctx, secretsList, listOpts...); err != nil { + return fmt.Errorf("failed to list secrets in namespace %s: %w", userResource.Namespace, err) + } + + for _, secret := range secretsList.Items { + for _, ref := range secret.OwnerReferences { + if ref.UID == userResource.GetUID() { + if (userResource.Spec.SecretName != "" && secret.Name != userResource.Spec.SecretName) || (userResource.Spec.SecretName == "" && secret.Name != userResource.Name) { + if err := r.deleteSecret(ctx, &secret); err != nil { + logger.Info("Failed to delete unused secret", "secret", secret.Name) + return fmt.Errorf("failed to delete unused secret %s, err %w", secret.Name, err) + } + } + } + } + } + + return nil +} + +func (r *S3UserReconciler) getUserSecret( + ctx context.Context, + userResource *s3v1alpha1.S3User, +) (corev1.Secret, error) { + userSecret := &corev1.Secret{} + secretName := userResource.Spec.SecretName + if secretName == "" { + secretName = userResource.Name + } + err := r.Get( + ctx, + types.NamespacedName{Namespace: userResource.Namespace, Name: secretName}, + userSecret, + ) + if err != nil { + if k8sapierrors.IsNotFound(err) { + return *userSecret, fmt.Errorf( + "secret %s not found in namespace %s", + secretName, + userResource.Namespace, + ) + } + return *userSecret, err + } + + for _, ref := range userSecret.OwnerReferences { + if ref.UID == userResource.GetUID() { + return *userSecret, nil + } + } + + return *userSecret, err +} + +func (r *S3UserReconciler) deleteSecret(ctx context.Context, secret *corev1.Secret) error { + logger := log.FromContext(ctx) + logger.Info("the secret named " + secret.Name + " will be deleted") + err := r.Delete(ctx, secret) + if err != nil { + logger.Error(err, "An error occurred while deleting a secret") + return err + } + return nil +} + +// newSecretForCR returns a secret with the same name/namespace as the CR. +// The secret will include all labels and annotations from the CR. +func (r *S3UserReconciler) newSecretForCR( + ctx context.Context, + userResource *s3v1alpha1.S3User, + data map[string][]byte, +) (*corev1.Secret, error) { + logger := log.FromContext(ctx) + + // Reusing the S3User's labels and annotations + labels := map[string]string{} + labels["app.kubernetes.io/created-by"] = "s3-operator" + for k, v := range userResource.ObjectMeta.Labels { + labels[k] = v + } + + annotations := map[string]string{} + for k, v := range userResource.ObjectMeta.Annotations { + annotations[k] = v + } + + secretName := cmp.Or(userResource.Spec.SecretName, userResource.Name) + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: userResource.Namespace, + Labels: labels, + Annotations: annotations, + }, + Data: data, + Type: "Opaque", + } + + // Set S3User instance as the owner and controller + err := ctrl.SetControllerReference(userResource, secret, r.Scheme) + if err != nil { + logger.Error(err, "Could not set owner of kubernetes secret") + return nil, err + } + + return secret, nil + +} diff --git a/internal/helpers/S3instance_test.go b/internal/helpers/S3instance_test.go new file mode 100644 index 0000000..678416a --- /dev/null +++ b/internal/helpers/S3instance_test.go @@ -0,0 +1,228 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers_test + +import ( + "context" + "testing" + + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + helpers "github.com/InseeFrLab/s3-operator/internal/helpers" + TestUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" +) + +func TestGetS3ClientForRessource(t *testing.T) { + + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Register the custom resource with the scheme sch := runtime.NewScheme() + s := scheme.Scheme + s3v1alpha1.AddToScheme(s) + corev1.AddToScheme(s) + + s3Instance := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + Status: s3v1alpha1.S3InstanceStatus{Conditions: []metav1.Condition{{Reason: s3v1alpha1.Reconciled}}}, + } + + s3Instance_not_ready := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "not-ready", + Namespace: "s3-operator", + }, + Status: s3v1alpha1.S3InstanceStatus{Conditions: []metav1.Condition{{Reason: s3v1alpha1.CreationFailure}}}, + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + client := fake.NewClientBuilder(). + WithScheme(s). + WithObjects(s3Instance, s3Instance_not_ready, secret). + WithStatusSubresource(s3Instance, s3Instance_not_ready, secret). + Build() + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + + t.Run("no error", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + _, err := s3instanceHelper.GetS3ClientForRessource(context.TODO(), client, testUtils.S3Factory, "bucket-example", "default", "s3-operator/default") + assert.NoError(t, err) + }) + + t.Run("error because instance not ready", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + _, err := s3instanceHelper.GetS3ClientForRessource(context.TODO(), client, testUtils.S3Factory, "bucket-example", "default", "s3-operator/not-ready") + assert.Equal(t, "S3instance is not in a ready state", err.Error()) + }) +} + +func TestGetS3ClientFromS3instance(t *testing.T) { + // Set up a logger before running tests + log.SetLogger(zap.New(zap.UseDevMode(true))) + + // Register the custom resource with the scheme sch := runtime.NewScheme() + s := scheme.Scheme + s3v1alpha1.AddToScheme(s) + corev1.AddToScheme(s) + + s3Instance := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + } + + secret := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + client := fake.NewClientBuilder(). + WithScheme(s). + WithObjects(s3Instance, secret). + WithStatusSubresource(s3Instance, secret). + Build() + + // Add mock for s3Factory and client + testUtils := TestUtils.NewTestUtils() + testUtils.SetupMockedS3FactoryAndClient() + + t.Run("no error", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + _, err := s3instanceHelper.GetS3ClientFromS3Instance(context.TODO(), client, testUtils.S3Factory, s3Instance) + assert.NoError(t, err) + }) +} + +func TestGetS3InstanceRefInfo(t *testing.T) { + t.Run("Exact match", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.GetS3InstanceRefInfo("s3-operator/default", "default") + assert.Equal(t, true, result.Equal("default", "s3-operator")) + }) + + t.Run("Exact match", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.GetS3InstanceRefInfo("default", "default") + assert.Equal(t, true, result.Equal("default", "default")) + }) +} + +func TestIsAllowedNamespaces(t *testing.T) { + s3Instance := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + } + + t.Run("Exact match", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.IsAllowedNamespaces("default", s3Instance) + assert.Equal(t, true, result) + }) + + t.Run("Wildcard prefix", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.IsAllowedNamespaces("test-namespace", s3Instance) + assert.Equal(t, true, result) + }) + + t.Run("Wildcard suffix", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.IsAllowedNamespaces("my-namespace", s3Instance) + assert.Equal(t, true, result) + }) + + t.Run("Wildcard contains", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.IsAllowedNamespaces("this-is-allowed-namespace", s3Instance) + assert.Equal(t, true, result) + }) + + t.Run("Not allowed", func(t *testing.T) { + s3instanceHelper := helpers.NewS3InstanceHelper() + result := s3instanceHelper.IsAllowedNamespaces("random", s3Instance) + assert.Equal(t, false, result) + }) +} diff --git a/internal/helpers/controller.go b/internal/helpers/controller.go new file mode 100644 index 0000000..a243ada --- /dev/null +++ b/internal/helpers/controller.go @@ -0,0 +1,90 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "context" + "fmt" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "k8s.io/apimachinery/pkg/api/meta" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +type ControllerHelper struct { +} + +func NewControllerHelper() *ControllerHelper { + return &ControllerHelper{} +} + +// SetReconciledCondition is a generic helper to update the reconciled condition for any Kubernetes resource. +func (c *ControllerHelper) SetReconciledCondition( + ctx context.Context, + statusWriter client.StatusWriter, // Allows updating status for any reconciler + req reconcile.Request, + resource client.Object, // Accepts any Kubernetes object with conditions + conditions *[]metav1.Condition, // Conditions field reference (must be a pointer) + conditionType string, // The type of condition to set + reason string, + message string, + err error, + requeueAfter time.Duration, // Requeue period for reconciliation +) (reconcile.Result, error) { + logger := log.FromContext(ctx) + + var changed bool + + if err != nil { + logger.Error(err, message, "NamespacedName", req.NamespacedName.String()) + changed = meta.SetStatusCondition( + conditions, + metav1.Condition{ + Type: conditionType, + Status: metav1.ConditionFalse, + ObservedGeneration: resource.GetGeneration(), + Reason: reason, + Message: fmt.Sprintf("%s: %s", message, err), + }, + ) + } else { + logger.Info(message, "NamespacedName", req.NamespacedName.String()) + changed = meta.SetStatusCondition( + conditions, + metav1.Condition{ + Type: conditionType, + Status: metav1.ConditionTrue, + ObservedGeneration: resource.GetGeneration(), + Reason: reason, + Message: message, + }, + ) + } + + if changed { + if errStatusUpdate := statusWriter.Update(ctx, resource); errStatusUpdate != nil { + logger.Error(errStatusUpdate, "Failed to update resource status", "ObjectKind", resource.GetObjectKind(), "NamespacedName", req.NamespacedName.String()) + return reconcile.Result{}, errStatusUpdate + } + } + + return reconcile.Result{RequeueAfter: requeueAfter}, err +} diff --git a/internal/helpers/controller_test.go b/internal/helpers/controller_test.go new file mode 100644 index 0000000..a71c4db --- /dev/null +++ b/internal/helpers/controller_test.go @@ -0,0 +1,119 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers_test + +import ( + "context" + "fmt" + "testing" + "time" + + ctrl "sigs.k8s.io/controller-runtime" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + "github.com/InseeFrLab/s3-operator/internal/helpers" + testUtils "github.com/InseeFrLab/s3-operator/test/utils" + "github.com/stretchr/testify/assert" +) + +func TestSetReconciledCondition(t *testing.T) { + + log.SetLogger(zap.New(zap.UseDevMode(true))) + + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + Finalizers: []string{"s3.onyxia.sh/finalizer"}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + } + testUtils := testUtils.NewTestUtils() + testUtils.SetupClient([]client.Object{s3instanceResource}) + controllerHelper := helpers.NewControllerHelper() + + t.Run("no error", func(t *testing.T) { + _, err := controllerHelper.SetReconciledCondition( + context.TODO(), + testUtils.Client.Status(), + ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}}, + s3instanceResource, + &s3instanceResource.Status.Conditions, + s3v1alpha1.Reconciled, + "s3Instance reconciled", + "s3Instance reconciled", + nil, time.Duration(10), + ) + assert.NoError(t, err) + }) + + t.Run("ressource status have changed", func(t *testing.T) { + s3instanceResourceUpdated := &s3v1alpha1.S3Instance{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "s3-operator", + Name: "default", + }, s3instanceResourceUpdated) + assert.NoError(t, err) + assert.Equal(t, s3v1alpha1.Reconciled, s3instanceResourceUpdated.Status.Conditions[0].Type) + assert.Equal(t, "s3Instance reconciled", s3instanceResourceUpdated.Status.Conditions[0].Message) + }) + + t.Run("with error", func(t *testing.T) { + _, err := controllerHelper.SetReconciledCondition( + context.TODO(), + testUtils.Client.Status(), + ctrl.Request{NamespacedName: types.NamespacedName{Name: s3instanceResource.Name, Namespace: s3instanceResource.Namespace}}, + s3instanceResource, + &s3instanceResource.Status.Conditions, + s3v1alpha1.CreationFailure, + "s3Instance reconciled", + "s3Instance reconciled", + fmt.Errorf("Something wrong have happened"), time.Duration(10), + ) + + assert.NotNil(t, err) + + }) + + t.Run("ressource status have changed", func(t *testing.T) { + s3instanceResourceUpdated := &s3v1alpha1.S3Instance{} + err := testUtils.Client.Get(context.TODO(), client.ObjectKey{ + Namespace: "s3-operator", + Name: "default", + }, s3instanceResourceUpdated) + assert.NoError(t, err) + assert.Equal(t, s3v1alpha1.CreationFailure, s3instanceResourceUpdated.Status.Conditions[1].Type) + assert.Contains(t, s3instanceResourceUpdated.Status.Conditions[1].Message, "Something wrong have happened") + }) +} diff --git a/controllers/utils/password/password_generator.go b/internal/helpers/password_generator.go similarity index 78% rename from controllers/utils/password/password_generator.go rename to internal/helpers/password_generator.go index 884ccef..b46adcc 100644 --- a/controllers/utils/password/password_generator.go +++ b/internal/helpers/password_generator.go @@ -1,4 +1,20 @@ -package password +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers import ( "crypto/rand" @@ -7,8 +23,11 @@ import ( "strings" ) -type PasswordGenerator interface { - Generate(int, int, int, bool, bool) (string, error) +type PasswordGenerator struct { +} + +func NewPasswordGenerator() *PasswordGenerator { + return &PasswordGenerator{} } const ( @@ -26,7 +45,7 @@ const ( ) // func GeneratePassword(length int, useLetters bool, useSpecial bool, useNum bool) string { -func Generate(length int, useLetters bool, useSpecial bool, useNum bool) (string, error) { +func (p *PasswordGenerator) Generate(length int, useLetters bool, useSpecial bool, useNum bool) (string, error) { gen, err := NewGenerator(nil) if err != nil { return "", err @@ -42,7 +61,14 @@ func Generate(length int, useLetters bool, useSpecial bool, useNum bool) (string // // The algorithm is fast, but it's not designed to be performant; it favors // entropy over speed. This function is safe for concurrent use. -func (g *Generator) Generate(length int, useDigit bool, useSymbol bool, useUpper bool, useLower bool, allowRepeat bool) (string, error) { +func (g *Generator) Generate( + length int, + useDigit bool, + useSymbol bool, + useUpper bool, + useLower bool, + allowRepeat bool, +) (string, error) { choices := "" if useDigit { diff --git a/internal/helpers/password_generator_test.go b/internal/helpers/password_generator_test.go new file mode 100644 index 0000000..4e27592 --- /dev/null +++ b/internal/helpers/password_generator_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers_test + +import ( + "testing" + + helpers "github.com/InseeFrLab/s3-operator/internal/helpers" + "github.com/stretchr/testify/assert" +) + +func TestGenerate(t *testing.T) { + t.Run("Exact match", func(t *testing.T) { + passwordGenerator := helpers.NewPasswordGenerator() + password, _ := passwordGenerator.Generate(20, true, true, true) + assert.Len(t, password, 20) + }) +} diff --git a/internal/helpers/s3instance.go b/internal/helpers/s3instance.go new file mode 100644 index 0000000..31e83c1 --- /dev/null +++ b/internal/helpers/s3instance.go @@ -0,0 +1,251 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "context" + "fmt" + "strings" + + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" + corev1 "k8s.io/api/core/v1" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + k8sapierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +func (s3InstanceHelper *S3InstanceHelper) GetS3ClientForRessource( + ctx context.Context, + client client.Client, + s3factory s3factory.S3Factory, + ressourceName string, + ressourceNamespace string, + ressourceS3InstanceRef string, +) (s3client.S3Client, error) { + logger := log.FromContext(ctx) + logger.Info(fmt.Sprintf("Resource refer to s3Instance %s", ressourceS3InstanceRef)) + s3InstanceInfo := s3InstanceHelper.GetS3InstanceRefInfo(ressourceS3InstanceRef, ressourceNamespace) + s3Instance := &s3v1alpha1.S3Instance{} + err := client.Get( + ctx, + types.NamespacedName{Namespace: s3InstanceInfo.namespace, Name: s3InstanceInfo.name}, + s3Instance, + ) + + if err != nil { + if k8sapierrors.IsNotFound(err) { + return nil, fmt.Errorf("S3Instance %s not found", s3InstanceInfo.name) + } + return nil, err + } + + if !s3InstanceHelper.IsAllowedNamespaces(ressourceNamespace, s3Instance) { + logger.Info( + fmt.Sprintf( + "Resource %s try to use s3instance %s in namespace %s but is not allowed", + ressourceName, + s3InstanceInfo.name, + s3InstanceInfo.namespace, + ), + ) + return nil, fmt.Errorf("S3Instance %s not found", s3InstanceInfo.name) + } + + if s3Instance.Status.Conditions[0].Reason != s3v1alpha1.Reconciled { + return nil, fmt.Errorf("S3instance is not in a ready state") + } + + return s3InstanceHelper.GetS3ClientFromS3Instance(ctx, client, s3factory, s3Instance) +} + +func (s3InstanceHelper *S3InstanceHelper) GetS3ClientFromS3Instance( + ctx context.Context, + client client.Client, + s3factory s3factory.S3Factory, + s3InstanceResource *s3v1alpha1.S3Instance, +) (s3client.S3Client, error) { + logger := log.FromContext(ctx) + + s3InstanceSecretSecret, err := s3InstanceHelper.getS3InstanceAccessSecret(ctx, client, s3InstanceResource) + if err != nil { + logger.Error( + err, + "Could not get s3Instance auth secret in namespace", + "s3InstanceSecretRefName", + s3InstanceResource.Spec.SecretRef, + "NamespacedName", + s3InstanceResource.Namespace, + ) + return nil, err + } + + s3InstanceCaCertSecret, err := s3InstanceHelper.getS3InstanceCaCertSecret(ctx, client, s3InstanceResource) + if err != nil { + logger.Error( + err, + "Could not get s3Instance cert secret in namespace", + "s3InstanceSecretRefName", + s3InstanceResource.Spec.SecretRef, + "NamespacedName", + s3InstanceResource.Namespace, + ) + return nil, err + } + + allowedNamepaces := []string{s3InstanceResource.Namespace} + if len(s3InstanceResource.Spec.AllowedNamespaces) > 0 { + allowedNamepaces = s3InstanceResource.Spec.AllowedNamespaces + } + + s3Config := &s3client.S3Config{ + S3Provider: s3InstanceResource.Spec.S3Provider, + AccessKey: string(s3InstanceSecretSecret.Data["S3_ACCESS_KEY"]), + SecretKey: string(s3InstanceSecretSecret.Data["S3_SECRET_KEY"]), + S3Url: s3InstanceResource.Spec.Url, + Region: s3InstanceResource.Spec.Region, + AllowedNamespaces: allowedNamepaces, + CaCertificatesBase64: []string{string(s3InstanceCaCertSecret.Data["ca.crt"])}, + BucketDeletionEnabled: s3InstanceResource.Spec.BucketDeletionEnabled, + S3UserDeletionEnabled: s3InstanceResource.Spec.S3UserDeletionEnabled, + PolicyDeletionEnabled: s3InstanceResource.Spec.PolicyDeletionEnabled, + PathDeletionEnabled: s3InstanceResource.Spec.PathDeletionEnabled, + } + + return s3factory.GenerateS3Client(s3Config.S3Provider, s3Config) +} + +func (s3InstanceHelper *S3InstanceHelper) getS3InstanceAccessSecret( + ctx context.Context, + client client.Client, + s3InstanceResource *s3v1alpha1.S3Instance, +) (corev1.Secret, error) { + s3InstanceSecret := &corev1.Secret{} + err := client.Get( + ctx, + types.NamespacedName{ + Namespace: s3InstanceResource.Namespace, + Name: s3InstanceResource.Spec.SecretRef, + }, + s3InstanceSecret, + ) + if err != nil { + if k8sapierrors.IsNotFound(err) { + return *s3InstanceSecret, fmt.Errorf( + "secret %s not found in namespace %s", + s3InstanceResource.Spec.SecretRef, + s3InstanceResource.Namespace, + ) + } + return *s3InstanceSecret, err + } + return *s3InstanceSecret, nil +} + +func (s3InstanceHelper *S3InstanceHelper) getS3InstanceCaCertSecret( + ctx context.Context, + client client.Client, + s3InstanceResource *s3v1alpha1.S3Instance, +) (corev1.Secret, error) { + logger := log.FromContext(ctx) + + s3InstanceCaCertSecret := &corev1.Secret{} + + if s3InstanceResource.Spec.CaCertSecretRef == "" { + logger.Info(fmt.Sprintf("No CaCertSecretRef for s3instance %s", s3InstanceResource.Name)) + return *s3InstanceCaCertSecret, nil + } + + err := client.Get( + ctx, + types.NamespacedName{ + Namespace: s3InstanceResource.Namespace, + Name: s3InstanceResource.Spec.CaCertSecretRef, + }, + s3InstanceCaCertSecret, + ) + if err != nil { + if k8sapierrors.IsNotFound(err) { + logger.Info( + "No Secret %s for s3instance %s", + s3InstanceResource.Spec.CaCertSecretRef, + s3InstanceResource.Name, + ) + return *s3InstanceCaCertSecret, fmt.Errorf( + "secret %s not found in namespace %s", + s3InstanceResource.Spec.CaCertSecretRef, + s3InstanceResource.Namespace, + ) + } + return *s3InstanceCaCertSecret, err + } + return *s3InstanceCaCertSecret, nil +} + +func (s3InstanceHelper *S3InstanceHelper) GetS3InstanceRefInfo(ressourceS3InstanceRef string, ressourceNamespace string) S3InstanceInfo { + if strings.Contains(ressourceS3InstanceRef, "/") { + result := strings.Split(ressourceS3InstanceRef, "/") + return S3InstanceInfo{name: result[1], namespace: result[0]} + } + return S3InstanceInfo{name: ressourceS3InstanceRef, namespace: ressourceNamespace} +} + +func (s3InstanceHelper *S3InstanceHelper) IsAllowedNamespaces(namespace string, s3Instance *s3v1alpha1.S3Instance) bool { + if len(s3Instance.Spec.AllowedNamespaces) > 0 { + for _, allowedNamespace := range s3Instance.Spec.AllowedNamespaces { + if strings.HasPrefix(allowedNamespace, "*") && + strings.HasSuffix(allowedNamespace, "*") && strings.Contains( + namespace, + strings.TrimSuffix(strings.TrimPrefix(allowedNamespace, "*"), "*"), + ) { + return true + } else if strings.HasPrefix(allowedNamespace, "*") && strings.HasSuffix(namespace, strings.TrimPrefix(allowedNamespace, "*")) { + return true + } else if strings.HasSuffix(allowedNamespace, "*") && strings.HasPrefix(namespace, strings.TrimSuffix(allowedNamespace, "*")) { + return true + } else if namespace == allowedNamespace { + return true + } + } + return false + } else { + return namespace == s3Instance.Namespace + } +} + +type S3InstanceInfo struct { + name string + namespace string +} + +func (s3InstanceInfo S3InstanceInfo) String() string { + return fmt.Sprintf("%s/%s", s3InstanceInfo.namespace, s3InstanceInfo.name) +} + +func (s3InstanceInfo S3InstanceInfo) Equal(s3InstanceInfoName string, s3InstanceInfoNamespace string) bool { + return s3InstanceInfo.name == s3InstanceInfoName && s3InstanceInfo.namespace == s3InstanceInfoNamespace +} + +type S3InstanceHelper struct { +} + +func NewS3InstanceHelper() *S3InstanceHelper { + return &S3InstanceHelper{} +} diff --git a/controllers/s3/factory/minioS3Client.go b/internal/s3/client/impl/minioS3Client.go similarity index 56% rename from controllers/s3/factory/minioS3Client.go rename to internal/s3/client/impl/minioS3Client.go index fb307d7..c340fbd 100644 --- a/controllers/s3/factory/minioS3Client.go +++ b/internal/s3/client/impl/minioS3Client.go @@ -1,107 +1,177 @@ -package factory +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3clientimpl import ( "bytes" "context" "crypto/tls" "crypto/x509" - "encoding/base64" + "fmt" "net/http" - "os" + neturl "net/url" "strings" + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" "github.com/minio/madmin-go/v3" "github.com/minio/minio-go/v7" "github.com/minio/minio-go/v7/pkg/credentials" + ctrl "sigs.k8s.io/controller-runtime" ) type MinioS3Client struct { - s3Config S3Config + s3Config s3client.S3Config client minio.Client adminClient madmin.AdminClient } -func newMinioS3Client(S3Config *S3Config) *MinioS3Client { +func NewMinioS3Client(S3Config *s3client.S3Config) (*MinioS3Client, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("creating minio clients (regular and admin)") + minioClient, err := generateMinioClient( + S3Config.S3Url, + S3Config.AccessKey, + S3Config.SecretKey, + S3Config.Region, + S3Config.CaCertificatesBase64, + ) + if err != nil { + s3Logger.Error(err, "an error occurred while creating a new minio client") + return nil, err + } + adminClient, err := generateAdminMinioClient( + S3Config.S3Url, + S3Config.AccessKey, + S3Config.SecretKey, + S3Config.CaCertificatesBase64, + ) + if err != nil { + s3Logger.Error(err, "an error occurred while creating a new minio admin client") + return nil, err + } + return &MinioS3Client{*S3Config, *minioClient, *adminClient}, nil +} + +func generateMinioClient( + url string, + accessKey string, + secretKey string, + region string, + caCertificates []string, +) (*minio.Client, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") + hostname, isSSL, err := extractHostAndScheme(url) + if err != nil { + s3Logger.Error(err, "an error occurred while creating a new minio client") + return nil, err + } minioOptions := &minio.Options{ - Creds: credentials.NewStaticV4(S3Config.AccessKey, S3Config.SecretKey, ""), - Region: S3Config.Region, - Secure: S3Config.UseSsl, - } - - // Preparing the tlsConfig to support custom CA if configured - // See also : - // - https://pkg.go.dev/github.com/minio/minio-go/v7@v7.0.52#Options - // - https://pkg.go.dev/net/http#RoundTripper - // - https://youngkin.github.io/post/gohttpsclientserver/#create-the-client - // - https://forfuncsake.github.io/post/2017/08/trust-extra-ca-cert-in-go-app/ - // Appending content directly, from a base64-encoded, PEM format CA certificate - // Variant : if S3Config.CaBundlePath was a string[] - // for _, caCertificateFilePath := range S3Config.S3Config.CaBundlePaths { - // caCert, err := os.ReadFile(caCertificateFilePath) - // if err != nil { - // log.Fatalf("Error opening CA cert file %s, Error: %s", caCertificateFilePath, err) - // } - // rootCAs.AppendCertsFromPEM([]byte(caCert)) - // } - addTransportOptions(S3Config, minioOptions) - - minioClient, err := minio.New(S3Config.S3UrlEndpoint, minioOptions) + Creds: credentials.NewStaticV4(accessKey, secretKey, ""), + Region: region, + Secure: isSSL, + } + + if len(caCertificates) > 0 { + addTlsClientConfigToMinioOptions(caCertificates, minioOptions) + } + + minioClient, err := minio.New(hostname, minioOptions) if err != nil { s3Logger.Error(err, "an error occurred while creating a new minio client") + return nil, err + } + return minioClient, nil +} + +func generateAdminMinioClient( + url string, + accessKey string, + secretKey string, + caCertificates []string, +) (*madmin.AdminClient, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") + hostname, isSSL, err := extractHostAndScheme(url) + if err != nil { + s3Logger.Error(err, "an error occurred while creating a new minio admin client") + return nil, err + } + + minioOptions := &madmin.Options{ + Creds: credentials.NewStaticV4(accessKey, secretKey, ""), + Secure: isSSL, + } + + if len(caCertificates) > 0 { + addTlsClientConfigToMinioAdminOptions(caCertificates, minioOptions) } - adminClient, err := madmin.New(S3Config.S3UrlEndpoint, S3Config.AccessKey, S3Config.SecretKey, S3Config.UseSsl) + minioAdminClient, err := madmin.NewWithOptions(hostname, minioOptions) if err != nil { s3Logger.Error(err, "an error occurred while creating a new minio admin client") + return nil, err } - // Getting the custom root CA (if any) from the "regular" client's Transport - adminClient.SetCustomTransport(minioOptions.Transport) - return &MinioS3Client{*S3Config, *minioClient, *adminClient} + return minioAdminClient, nil } -func addTransportOptions(S3Config *S3Config, minioOptions *minio.Options) { - if len(S3Config.CaCertificatesBase64) > 0 { +func extractHostAndScheme(url string) (string, bool, error) { + parsedURL, err := neturl.Parse(url) + if err != nil { + return "", false, fmt.Errorf("cannot detect if url use ssl or not") + } + return parsedURL.Hostname(), parsedURL.Scheme == "https", nil +} - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } +func addTlsClientConfigToMinioOptions(caCertificates []string, minioOptions *minio.Options) { + rootCAs, _ := x509.SystemCertPool() + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } - for _, caCertificateBase64 := range S3Config.CaCertificatesBase64 { - decodedCaCertificate, err := base64.StdEncoding.DecodeString(caCertificateBase64) - if err != nil { - s3Logger.Error(err, "an error occurred while parsing a base64-encoded CA certificate") - } + for _, caCertificate := range caCertificates { + rootCAs.AppendCertsFromPEM([]byte(caCertificate)) + } - rootCAs.AppendCertsFromPEM(decodedCaCertificate) - } + minioOptions.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: rootCAs, + }, + } +} - minioOptions.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: rootCAs, - }, - } - } else if len(S3Config.CaBundlePath) > 0 { +func addTlsClientConfigToMinioAdminOptions(caCertificates []string, minioOptions *madmin.Options) { + rootCAs, _ := x509.SystemCertPool() + if rootCAs == nil { + rootCAs = x509.NewCertPool() + } - rootCAs, _ := x509.SystemCertPool() - if rootCAs == nil { - rootCAs = x509.NewCertPool() - } + for _, caCertificate := range caCertificates { + // caCertificateAsByte := []byte(caCertificate) + // caCertificateEncoded := base64.StdEncoding.EncodeToString(caCertificateAsByte) + // rootCAs.AppendCertsFromPEM([]byte(caCertificateEncoded)) + rootCAs.AppendCertsFromPEM([]byte(caCertificate)) - caCert, err := os.ReadFile(S3Config.CaBundlePath) - if err != nil { - s3Logger.Error(err, "an error occurred while reading a CA certificates bundle file") - } - rootCAs.AppendCertsFromPEM([]byte(caCert)) + } - minioOptions.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: rootCAs, - }, - } + minioOptions.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: rootCAs, + }, } } @@ -109,33 +179,72 @@ func addTransportOptions(S3Config *S3Config, minioOptions *minio.Options) { // Bucket methods // // ////////////////// func (minioS3Client *MinioS3Client) BucketExists(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("checking bucket existence", "bucket", name) return minioS3Client.client.BucketExists(context.Background(), name) } func (minioS3Client *MinioS3Client) CreateBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("creating bucket", "bucket", name) - return minioS3Client.client.MakeBucket(context.Background(), name, minio.MakeBucketOptions{Region: minioS3Client.s3Config.Region}) + return minioS3Client.client.MakeBucket( + context.Background(), + name, + minio.MakeBucketOptions{Region: minioS3Client.s3Config.Region}, + ) +} + +func (minioS3Client *MinioS3Client) ListBuckets() ([]string, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") + s3Logger.Info("listing bucket") + listBucketsInfo, err := minioS3Client.client.ListBuckets(context.Background()) + bucketsName := []string{} + if err != nil { + errAsResponse := minio.ToErrorResponse(err) + s3Logger.Error(err, "an error occurred while listing buckets", "code", errAsResponse.Code) + return bucketsName, err + } + for _, bucketInfo := range listBucketsInfo { + bucketsName = append(bucketsName, bucketInfo.Name) + } + return bucketsName, nil } // Will fail if bucket is not empty func (minioS3Client *MinioS3Client) DeleteBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("deleting bucket", "bucket", name) return minioS3Client.client.RemoveBucket(context.Background(), name) } func (minioS3Client *MinioS3Client) CreatePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("creating a path on a bucket", "bucket", bucketname, "path", path) emptyReader := bytes.NewReader([]byte("")) - _, err := minioS3Client.client.PutObject(context.Background(), bucketname, "/"+path+"/"+".keep", emptyReader, 0, minio.PutObjectOptions{}) + _, err := minioS3Client.client.PutObject( + context.Background(), + bucketname, + "/"+path+"/"+".keep", + emptyReader, + 0, + minio.PutObjectOptions{}, + ) if err != nil { - s3Logger.Error(err, "an error occurred during path creation on bucket", "bucket", bucketname, "path", path) + s3Logger.Error( + err, + "an error occurred during path creation on bucket", + "bucket", + bucketname, + "path", + path, + ) return err } return nil } func (minioS3Client *MinioS3Client) PathExists(bucketname string, path string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("checking path existence on a bucket", "bucket", bucketname, "path", path) _, err := minioS3Client.client. StatObject(context.Background(), @@ -158,10 +267,23 @@ func (minioS3Client *MinioS3Client) PathExists(bucketname string, path string) ( } func (minioS3Client *MinioS3Client) DeletePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("deleting a path on a bucket", "bucket", bucketname, "path", path) - err := minioS3Client.client.RemoveObject(context.Background(), bucketname, "/"+path+"/.keep", minio.RemoveObjectOptions{}) + err := minioS3Client.client.RemoveObject( + context.Background(), + bucketname, + "/"+path+"/.keep", + minio.RemoveObjectOptions{}, + ) if err != nil { - s3Logger.Error(err, "an error occurred during path deletion on bucket", "bucket", bucketname, "path", path) + s3Logger.Error( + err, + "an error occurred during path deletion on bucket", + "bucket", + bucketname, + "path", + path, + ) return err } return nil @@ -171,6 +293,7 @@ func (minioS3Client *MinioS3Client) DeletePath(bucketname string, path string) e // Quota methods // // ///////////////// func (minioS3Client *MinioS3Client) GetQuota(name string) (int64, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("getting quota on bucket", "bucket", name) bucketQuota, err := minioS3Client.adminClient.GetBucketQuota(context.Background(), name) if err != nil { @@ -180,8 +303,13 @@ func (minioS3Client *MinioS3Client) GetQuota(name string) (int64, error) { } func (minioS3Client *MinioS3Client) SetQuota(name string, quota int64) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("setting quota on bucket", "bucket", name, "quotaToSet", quota) - minioS3Client.adminClient.SetBucketQuota(context.Background(), name, &madmin.BucketQuota{Quota: uint64(quota), Type: madmin.HardQuota}) + minioS3Client.adminClient.SetBucketQuota( + context.Background(), + name, + &madmin.BucketQuota{Quota: uint64(quota), Type: madmin.HardQuota}, + ) return nil } @@ -202,6 +330,7 @@ func (minioS3Client *MinioS3Client) SetQuota(name string, quota int64) error { // A consequence is that we do things a little differently compared to buckets - instead of just testing for // existence, we get the whole policy info, and the controller uses it down the line. func (minioS3Client *MinioS3Client) GetPolicyInfo(name string) (*madmin.PolicyInfo, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("retrieving policy info", "policy", name) policy, err := minioS3Client.adminClient.InfoCannedPolicyV2(context.Background(), name) @@ -225,11 +354,13 @@ func (minioS3Client *MinioS3Client) GetPolicyInfo(name string) (*madmin.PolicyIn // The AddCannedPolicy of the madmin client actually does both creation and update (so does the CLI, as both // are wired to the same endpoint on Minio API server). func (minioS3Client *MinioS3Client) CreateOrUpdatePolicy(name string, content string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("create or update policy", "policy", name) return minioS3Client.adminClient.AddCannedPolicy(context.Background(), name, []byte(content)) } func (minioS3Client *MinioS3Client) PolicyExist(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("checking policy existence", "policy", name) policies, err := minioS3Client.adminClient.ListPolicies(context.Background(), name) if err != nil { @@ -245,6 +376,7 @@ func (minioS3Client *MinioS3Client) PolicyExist(name string) (bool, error) { } func (minioS3Client *MinioS3Client) DeletePolicy(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("delete policy", "policy", name) return minioS3Client.adminClient.RemoveCannedPolicy(context.Background(), name) } @@ -254,6 +386,7 @@ func (minioS3Client *MinioS3Client) DeletePolicy(name string) error { //////////////////// func (minioS3Client *MinioS3Client) CreateUser(accessKey string, secretKey string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Creating user", "accessKey", accessKey) err := minioS3Client.adminClient.AddUser(context.Background(), accessKey, secretKey) if err != nil { @@ -263,7 +396,12 @@ func (minioS3Client *MinioS3Client) CreateUser(accessKey string, secretKey strin return nil } -func (minioS3Client *MinioS3Client) AddServiceAccountForUser(name string, accessKey string, secretKey string) error { +func (minioS3Client *MinioS3Client) AddServiceAccountForUser( + name string, + accessKey string, + secretKey string, +) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Adding service account for user", "user", name, "accessKey", accessKey) opts := madmin.AddServiceAccountReq{ @@ -285,6 +423,7 @@ func (minioS3Client *MinioS3Client) AddServiceAccountForUser(name string, access } func (minioS3Client *MinioS3Client) UserExist(accessKey string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("checking user existence", "accessKey", accessKey) _, _err := minioS3Client.adminClient.GetUserInfo(context.Background(), accessKey) if _err != nil { @@ -299,6 +438,7 @@ func (minioS3Client *MinioS3Client) UserExist(accessKey string) (bool, error) { } func (minioS3Client *MinioS3Client) DeleteUser(accessKey string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("delete user with accessKey", "accessKey", accessKey) err := minioS3Client.adminClient.RemoveUser(context.Background(), accessKey) if err != nil { @@ -313,6 +453,7 @@ func (minioS3Client *MinioS3Client) DeleteUser(accessKey string) error { } func (minioS3Client *MinioS3Client) GetUserPolicies(accessKey string) ([]string, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Get user policies", "accessKey", accessKey) userInfo, err := minioS3Client.adminClient.GetUserInfo(context.Background(), accessKey) if err != nil { @@ -323,24 +464,36 @@ func (minioS3Client *MinioS3Client) GetUserPolicies(accessKey string) ([]string, return strings.Split(userInfo.PolicyName, ","), nil } -func (minioS3Client *MinioS3Client) CheckUserCredentialsValid(name string, accessKey string, secretKey string) (bool, error) { +func (minioS3Client *MinioS3Client) CheckUserCredentialsValid( + name string, + accessKey string, + secretKey string, +) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Check credentials for user", "user", name, "accessKey", accessKey) - minioTestClientOptions := &minio.Options{ - Creds: credentials.NewStaticV4(accessKey, secretKey, ""), - Region: minioS3Client.s3Config.Region, - Secure: minioS3Client.s3Config.UseSsl, - } - addTransportOptions(&minioS3Client.s3Config, minioTestClientOptions) - minioTestClient, err := minio.New(minioS3Client.s3Config.S3UrlEndpoint, minioTestClientOptions) + + minioTestClient, err := generateMinioClient( + minioS3Client.s3Config.S3Url, + accessKey, + secretKey, + minioS3Client.s3Config.Region, + minioS3Client.s3Config.CaCertificatesBase64, + ) if err != nil { s3Logger.Error(err, "An error occurred while creating a new Minio test client") + return false, err } - _, err = minioTestClient.ListBuckets(context.Background()) if err != nil { errAsResponse := minio.ToErrorResponse(err) if errAsResponse.Code == "SignatureDoesNotMatch" { - s3Logger.Info("the user credentials appear to be invalid", "accessKey", accessKey, "s3BackendError", errAsResponse) + s3Logger.Info( + "the user credentials appear to be invalid", + "accessKey", + accessKey, + "s3BackendError", + errAsResponse, + ) return false, nil } else if errAsResponse.Code == "InvalidAccessKeyId" { s3Logger.Info("this accessKey does not exist on the s3 backend", "accessKey", accessKey, "s3BackendError", errAsResponse) @@ -353,7 +506,11 @@ func (minioS3Client *MinioS3Client) CheckUserCredentialsValid(name string, acces return true, nil } -func (minioS3Client *MinioS3Client) RemovePoliciesFromUser(accessKey string, policies []string) error { +func (minioS3Client *MinioS3Client) RemovePoliciesFromUser( + accessKey string, + policies []string, +) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Removing policies from user", "user", accessKey, "policies", policies) opts := madmin.PolicyAssociationReq{ @@ -369,7 +526,12 @@ func (minioS3Client *MinioS3Client) RemovePoliciesFromUser(accessKey string, pol s3Logger.Info("The policy change has no net effect") return nil } - s3Logger.Error(err, "an error occurred when attaching a policy to the user", "code", errAsResp.Code) + s3Logger.Error( + err, + "an error occurred when attaching a policy to the user", + "code", + errAsResp.Code, + ) return err } @@ -377,6 +539,7 @@ func (minioS3Client *MinioS3Client) RemovePoliciesFromUser(accessKey string, pol } func (minioS3Client *MinioS3Client) AddPoliciesToUser(accessKey string, policies []string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3clientimplminio") s3Logger.Info("Adding policies to user", "user", accessKey, "policies", policies) opts := madmin.PolicyAssociationReq{ User: accessKey, @@ -389,8 +552,17 @@ func (minioS3Client *MinioS3Client) AddPoliciesToUser(accessKey string, policies s3Logger.Info("The policy change has no net effect") return nil } - s3Logger.Error(err, "an error occurred when attaching a policy to the user", "code", errAsResp.Code) + s3Logger.Error( + err, + "an error occurred when attaching a policy to the user", + "code", + errAsResp.Code, + ) return err } return nil } + +func (minioS3Client *MinioS3Client) GetConfig() *s3client.S3Config { + return &minioS3Client.s3Config +} diff --git a/controllers/s3/factory/mockedS3Client.go b/internal/s3/client/impl/mockedS3Client.go similarity index 59% rename from controllers/s3/factory/mockedS3Client.go rename to internal/s3/client/impl/mockedS3Client.go index 96770fb..ea79403 100644 --- a/controllers/s3/factory/mockedS3Client.go +++ b/internal/s3/client/impl/mockedS3Client.go @@ -1,113 +1,159 @@ -package factory +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3clientimpl import ( + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" "github.com/minio/madmin-go/v3" + ctrl "sigs.k8s.io/controller-runtime" ) -type MockedS3Client struct{} +type MockedS3Client struct { + s3Config s3client.S3Config +} func (mockedS3Provider *MockedS3Client) BucketExists(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking bucket existence", "bucket", name) return false, nil } func (mockedS3Provider *MockedS3Client) CreateBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking a bucket", "bucket", name) return nil } func (mockedS3Provider *MockedS3Client) DeleteBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("deleting a bucket", "bucket", name) return nil } func (mockedS3Provider *MockedS3Client) CreatePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("creating a path on a bucket", "bucket", bucketname, "path", path) return nil } func (mockedS3Provider *MockedS3Client) PathExists(bucketname string, path string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking path existence on a bucket", "bucket", bucketname, "path", path) return true, nil } func (mockedS3Provider *MockedS3Client) DeletePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("deleting a path on a bucket", "bucket", bucketname, "path", path) return nil } func (mockedS3Provider *MockedS3Client) GetQuota(name string) (int64, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("getting quota on bucket", "bucket", name) return 1, nil } func (mockedS3Provider *MockedS3Client) SetQuota(name string, quota int64) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("setting quota on bucket", "bucket", name, "quotaToSet", quota) return nil } func (mockedS3Provider *MockedS3Client) GetPolicyInfo(name string) (*madmin.PolicyInfo, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("retrieving policy info", "policy", name) return nil, nil } func (mockedS3Provider *MockedS3Client) CreateOrUpdatePolicy(name string, content string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("create or update policy", "policy", name, "policyContent", content) return nil } func (mockedS3Provider *MockedS3Client) CreateUser(name string, password string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("create or update user", "user", name) return nil } func (mockedS3Provider *MockedS3Client) UserExist(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking user existence", "user", name) return true, nil } func (mockedS3Provider *MockedS3Client) AddServiceAccountForUser(name string, accessKey string, secretKey string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("Adding service account for user", "user", name) return nil } func (mockedS3Provider *MockedS3Client) PolicyExist(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking policy existence", "policy", name) return true, nil } - func (mockedS3Provider *MockedS3Client) AddPoliciesToUser(username string, policies []string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("Adding policies to user", "user", username, "policies", policies) return nil } - func (mockedS3Provider *MockedS3Client) DeletePolicy(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("delete policy", "policy", name) return nil } func (mockedS3Provider *MockedS3Client) DeleteUser(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("delete user", "user", name) return nil } func (mockedS3Provider *MockedS3Client) CheckUserCredentialsValid(name string, accessKey string, secretKey string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("checking credential for user", "user", name) return true, nil } func (mockedS3Provider *MockedS3Client) GetUserPolicies(name string) ([]string, error) { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("Getting user policies for user", "user", name) return []string{}, nil } func (mockedS3Provider *MockedS3Client) RemovePoliciesFromUser(username string, policies []string) error { + s3Logger := ctrl.Log.WithValues("logger", "s3ClientImplMocked") s3Logger.Info("Removing policies from user", "user", username) return nil } -func newMockedS3Client() *MockedS3Client { - return &MockedS3Client{} +func (mockedS3Provider *MockedS3Client) ListBuckets() ([]string, error) { + return []string{}, nil +} + +func (mockedS3Provider *MockedS3Client) GetConfig() *s3client.S3Config { + return &mockedS3Provider.s3Config +} + +func NewMockedS3Client() *MockedS3Client { + return &MockedS3Client{s3Config: s3client.S3Config{}} } diff --git a/controllers/s3/factory/interface.go b/internal/s3/client/s3client.go similarity index 56% rename from controllers/s3/factory/interface.go rename to internal/s3/client/s3client.go index e8bf10f..cc48515 100644 --- a/controllers/s3/factory/interface.go +++ b/internal/s3/client/s3client.go @@ -1,17 +1,39 @@ -package factory +/* +Copyright 2023. -import ( - "fmt" +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - "github.com/minio/madmin-go/v3" + http://www.apache.org/licenses/LICENSE-2.0 - ctrl "sigs.k8s.io/controller-runtime" -) +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3client -var ( - s3Logger = ctrl.Log.WithValues("logger", "s3client") +import ( + "github.com/minio/madmin-go/v3" ) +type S3Config struct { + S3Provider string + S3Url string + Region string + AccessKey string + SecretKey string + CaCertificatesBase64 []string + AllowedNamespaces []string + BucketDeletionEnabled bool + S3UserDeletionEnabled bool + PathDeletionEnabled bool + PolicyDeletionEnabled bool +} + type S3Client interface { BucketExists(name string) (bool, error) CreateBucket(name string) error @@ -35,25 +57,6 @@ type S3Client interface { GetUserPolicies(name string) ([]string, error) AddPoliciesToUser(accessKey string, policies []string) error RemovePoliciesFromUser(accessKey string, policies []string) error -} - -type S3Config struct { - S3Provider string - S3UrlEndpoint string - Region string - AccessKey string - SecretKey string - UseSsl bool - CaCertificatesBase64 []string - CaBundlePath string -} - -func GetS3Client(s3Provider string, S3Config *S3Config) (S3Client, error) { - if s3Provider == "mockedS3Provider" { - return newMockedS3Client(), nil - } - if s3Provider == "minio" { - return newMinioS3Client(S3Config), nil - } - return nil, fmt.Errorf("s3 provider " + s3Provider + "not supported") + GetConfig() *S3Config + ListBuckets() ([]string, error) } diff --git a/internal/s3/factory/impl/s3factoryImpl.go b/internal/s3/factory/impl/s3factoryImpl.go new file mode 100644 index 0000000..8cca6fe --- /dev/null +++ b/internal/s3/factory/impl/s3factoryImpl.go @@ -0,0 +1,41 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3factory + +import ( + "fmt" + + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" + s3clientImpl "github.com/InseeFrLab/s3-operator/internal/s3/client/impl" +) + +type S3Factory struct { +} + +func NewS3Factory() *S3Factory { + return &S3Factory{} +} + +func (mockedS3Provider *S3Factory) GenerateS3Client(s3Provider string, s3Config *s3client.S3Config) (s3client.S3Client, error) { + if s3Provider == "mockedS3Provider" { + return s3clientImpl.NewMockedS3Client(), nil + } + if s3Provider == "minio" { + return s3clientImpl.NewMinioS3Client(s3Config) + } + return nil, fmt.Errorf("s3 provider %s not supported", s3Provider) +} diff --git a/internal/s3/factory/s3factory.go b/internal/s3/factory/s3factory.go new file mode 100644 index 0000000..2042b4f --- /dev/null +++ b/internal/s3/factory/s3factory.go @@ -0,0 +1,25 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package s3factory + +import ( + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" +) + +type S3Factory interface { + GenerateS3Client(s3Provider string, s3Config *s3client.S3Config) (s3client.S3Client, error) +} diff --git a/renovate.json b/renovate.json index 8fb6c6a..66fb250 100644 --- a/renovate.json +++ b/renovate.json @@ -1,11 +1,11 @@ -{ - "$schema": "https://docs.renovatebot.com/renovate-schema.json", - "extends": [ - "config:base" - ], - "packageRules": [{ - "managers": ["gomod"], - "depTypeList": ["indirect"], - "enabled": false - }] -} +{ + "$schema": "https://docs.renovatebot.com/renovate-schema.json", + "extends": [ + "config:base" + ], + "packageRules": [{ + "managers": ["gomod"], + "depTypeList": ["indirect"], + "enabled": false + }] +} diff --git a/test/mocks/S3FactoryMock.go b/test/mocks/S3FactoryMock.go new file mode 100644 index 0000000..60adc2a --- /dev/null +++ b/test/mocks/S3FactoryMock.go @@ -0,0 +1,37 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mocks + +import ( + "github.com/stretchr/testify/mock" + + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" +) + +// Mocked Factory +type MockedS3ClientFactory struct { + mock.Mock +} + +func NewMockedS3ClientFactory() *MockedS3ClientFactory { + return &MockedS3ClientFactory{} +} + +func (m *MockedS3ClientFactory) GenerateS3Client(s3Provider string, s3Config *s3client.S3Config) (s3client.S3Client, error) { + args := m.Called(s3Provider, s3Config) + return args.Get(0).(s3client.S3Client), args.Error(1) +} diff --git a/test/mocks/mockedS3Client.go b/test/mocks/mockedS3Client.go new file mode 100644 index 0000000..8418004 --- /dev/null +++ b/test/mocks/mockedS3Client.go @@ -0,0 +1,202 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mocks + +import ( + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" + "github.com/minio/madmin-go/v3" + "github.com/stretchr/testify/mock" + ctrl "sigs.k8s.io/controller-runtime" +) + +type MockedS3Client struct { + s3Config s3client.S3Config + mock.Mock +} + +func (mockedS3Provider *MockedS3Client) BucketExists(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking bucket existence", "bucket", name) + args := mockedS3Provider.Called(name) + return args.Bool(0), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) CreateBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking a bucket", "bucket", name) + args := mockedS3Provider.Called(name) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) DeleteBucket(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("deleting a bucket", "bucket", name) + args := mockedS3Provider.Called(name) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) CreatePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("creating a path on a bucket", "bucket", bucketname, "path", path) + args := mockedS3Provider.Called(bucketname, path) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) PathExists(bucketname string, path string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking path existence on a bucket", "bucket", bucketname, "path", path) + args := mockedS3Provider.Called(bucketname, path) + return args.Bool(0), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) DeletePath(bucketname string, path string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("deleting a path on a bucket", "bucket", bucketname, "path", path) + args := mockedS3Provider.Called(bucketname, path) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) GetQuota(name string) (int64, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("getting quota on bucket", "bucket", name) + args := mockedS3Provider.Called(name) + return int64(args.Int(0)), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) SetQuota(name string, quota int64) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("setting quota on bucket", "bucket", name, "quotaToSet", quota) + args := mockedS3Provider.Called(name, quota) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) GetPolicyInfo(name string) (*madmin.PolicyInfo, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("retrieving policy info", "policy", name) + args := mockedS3Provider.Called(name) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*madmin.PolicyInfo), args.Error(1) + +} + +func (mockedS3Provider *MockedS3Client) CreateOrUpdatePolicy(name string, content string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("create or update policy", "policy", name, "policyContent", content) + args := mockedS3Provider.Called(name, content) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) CreateUser(name string, password string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("create or update user", "user", name) + args := mockedS3Provider.Called(name, password) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) UserExist(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking user existence", "user", name) + args := mockedS3Provider.Called(name) + return args.Bool(0), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) AddServiceAccountForUser( + name string, + accessKey string, + secretKey string, +) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("Adding service account for user", "user", name) + args := mockedS3Provider.Called(name) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) PolicyExist(name string) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking policy existence", "policy", name) + args := mockedS3Provider.Called(name) + return args.Bool(0), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) AddPoliciesToUser( + username string, + policies []string, +) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("Adding policies to user", "user", username, "policies", policies) + args := mockedS3Provider.Called(username, policies) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) DeletePolicy(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("delete policy", "policy", name) + args := mockedS3Provider.Called(name) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) DeleteUser(name string) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("delete user", "user", name) + args := mockedS3Provider.Called(name) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) CheckUserCredentialsValid( + name string, + accessKey string, + secretKey string, +) (bool, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("checking credential for user", "user", name) + args := mockedS3Provider.Called(name, accessKey, secretKey) + return args.Bool(0), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) GetUserPolicies(name string) ([]string, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("Getting user policies for user", "user", name) + args := mockedS3Provider.Called(name) + return args.Get(0).([]string), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) RemovePoliciesFromUser( + username string, + policies []string, +) error { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("Removing policies from user", "user", username) + args := mockedS3Provider.Called(username, policies) + return args.Error(0) +} + +func (mockedS3Provider *MockedS3Client) ListBuckets() ([]string, error) { + s3Logger := ctrl.Log.WithValues("logger", "mockedS3Client") + s3Logger.Info("Listing bucket") + args := mockedS3Provider.Called() + return args.Get(0).([]string), args.Error(1) +} + +func (mockedS3Provider *MockedS3Client) GetConfig() *s3client.S3Config { + return &mockedS3Provider.s3Config +} + +func NewMockedS3Client() *MockedS3Client { + return &MockedS3Client{s3Config: s3client.S3Config{}} +} diff --git a/test/utils/testUtils.go b/test/utils/testUtils.go new file mode 100644 index 0000000..b899496 --- /dev/null +++ b/test/utils/testUtils.go @@ -0,0 +1,188 @@ +/* +Copyright 2023. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testUtils + +import ( + "fmt" + + s3v1alpha1 "github.com/InseeFrLab/s3-operator/api/v1alpha1" + s3client "github.com/InseeFrLab/s3-operator/internal/s3/client" + s3factory "github.com/InseeFrLab/s3-operator/internal/s3/factory" + "github.com/InseeFrLab/s3-operator/test/mocks" + "github.com/minio/madmin-go/v3" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +type TestUtils struct { + S3Factory s3factory.S3Factory + Client client.Client +} + +func NewTestUtils() *TestUtils { + return &TestUtils{} +} + +func (t *TestUtils) SetupMockedS3FactoryAndClient() { + mockedS3Client := mocks.NewMockedS3Client() + mockedS3Client.On("BucketExists", "test-bucket").Return(false, nil) + mockedS3Client.On("BucketExists", "existing-bucket").Return(true, nil) + mockedS3Client.On("CreateBucket", "test-bucket").Return(nil) + mockedS3Client.On("SetQuota", "test-bucket", int64(10)).Return(nil) + mockedS3Client.On("ListBuckets").Return([]string{}, nil) + mockedS3Client.On("GetPolicyInfo", "example-policy").Return(nil, nil) + existingPolicy := []byte(`{ +"Version": "2012-10-17", +"Statement": [ +{ + "Effect": "Allow", + "Principal": { + "AWS": "*" + }, + "Action": [ + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": "arn:aws:s3:::my-bucket/*" +} +] +}`) + mockedS3Client.On("GetPolicyInfo", "existing-policy").Return(&madmin.PolicyInfo{PolicyName: "existing-policy", Policy: existingPolicy}, nil) + mockedS3Client.On("CreateOrUpdatePolicy", "existing-policy", mock.AnythingOfType("string")).Return(nil) + mockedS3Client.On("CreateOrUpdatePolicy", "example-policy", "").Return(nil) + mockedS3Client.On("PathExists", "existing-bucket", "mypath").Return(false, nil) + mockedS3Client.On("CreatePath", "existing-bucket", "mypath").Return(nil) + mockedS3Client.On("UserExist", "example-user").Return(false, nil) + mockedS3Client.On("CreateUser", "example-user", mock.AnythingOfType("string")).Return(nil) + mockedS3Client.On("AddPoliciesToUser", "example-user", mock.AnythingOfType("[]string")).Return(nil) + + mockedS3Client.On("UserExist", "existing-valid-user").Return(true, nil) + mockedS3Client.On("CreateUser", "existing-valid-user", mock.AnythingOfType("string")).Return(nil) + mockedS3Client.On("AddPoliciesToUser", "existing-valid-user", mock.AnythingOfType("[]string")).Return(nil) + + mockedS3Client.On("CheckUserCredentialsValid", "existing-valid-user", "existing-valid-user", "invalidSecret").Return(false, nil) + mockedS3Client.On("CheckUserCredentialsValid", "existing-valid-user", "existing-valid-user", "invalidSecret").Return(false, nil) + mockedS3Client.On("CheckUserCredentialsValid", "existing-valid-user", "existing-valid-user", "validSecret").Return(true, nil) + mockedS3Client.On("CheckUserCredentialsValid", "existing-valid-user", "existing-valid-user", mock.AnythingOfType("string")).Return(true, nil) + mockedS3Client.On("GetQuota", "existing-bucket").Return(10, nil) + mockedS3Client.On("GetQuota", "existing-invalid-bucket").Return(10, nil) + mockedS3Client.On("SetQuota", "existing-invalid-bucket", int64(100)).Return(nil) + mockedS3Client.On("GetUserPolicies", "existing-valid-user").Return([]string{"admin"}, nil) + mockedS3Client.On("PathExists", "existing-bucket", "example").Return(true, nil) + mockedS3Client.On("PathExists", "existing-invalid-bucket", "example").Return(true, nil) + mockedS3Client.On("PathExists", "existing-invalid-bucket", "non-existing").Return(false, nil) + mockedS3Client.On("BucketExists", "existing-invalid-bucket").Return(true, nil) + mockedS3Client.On("BucketExists", "non-existing-bucket").Return(false, nil) + + mockedS3Client.On("CreatePath", "existing-invalid-bucket", "non-existing").Return(nil) + + mockedS3Client.On("DeleteUser", "existing-valid-user").Return(nil) + + mockedInvalidS3Client := mocks.NewMockedS3Client() + mockedInvalidS3Client.On("BucketExists", "test-bucket").Return(false, nil) + mockedInvalidS3Client.On("CreateBucket", "test-bucket").Return(nil) + mockedInvalidS3Client.On("SetQuota", "test-bucket", int64(10)).Return(nil) + + mockedInvalidS3Client.On("ListBuckets").Return([]string{}, fmt.Errorf("random error")) + + mockedS3factory := mocks.NewMockedS3ClientFactory() + mockedS3factory.On("GenerateS3Client", "minio", mock.MatchedBy(func(cfg *s3client.S3Config) bool { + return cfg.S3Url == "https://minio.example.com" + })).Return(mockedS3Client, nil) + mockedS3factory.On("GenerateS3Client", "minio", mock.MatchedBy(func(cfg *s3client.S3Config) bool { + return cfg.S3Url == "https://minio.invalid.example.com" + })).Return(mockedInvalidS3Client, nil) + + t.S3Factory = mockedS3factory +} + +func (t *TestUtils) SetupDefaultS3instance() *s3v1alpha1.S3Instance { + s3Instance := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + Status: s3v1alpha1.S3InstanceStatus{Conditions: []metav1.Condition{{Reason: s3v1alpha1.Reconciled}}}, + } + + return s3Instance +} + +func (t *TestUtils) GenerateBasicS3InstanceAndSecret() (*s3v1alpha1.S3Instance, *corev1.Secret) { + s3instanceResource := &s3v1alpha1.S3Instance{ + Spec: s3v1alpha1.S3InstanceSpec{ + AllowedNamespaces: []string{"default", "test-*", "*-namespace", "*allowed*"}, + Url: "https://minio.example.com", + S3Provider: "minio", + Region: "us-east-1", + BucketDeletionEnabled: true, + S3UserDeletionEnabled: true, + PathDeletionEnabled: true, + PolicyDeletionEnabled: true, + SecretRef: "minio-credentials", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "default", + Namespace: "s3-operator", + }, + Status: s3v1alpha1.S3InstanceStatus{Conditions: []metav1.Condition{{Reason: s3v1alpha1.Reconciled}}}, + } + + secretResource := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: "minio-credentials", + Namespace: "s3-operator", + }, + StringData: map[string]string{ + "accessKey": "access_key_value", + "secretKey": "secret_key_value", + }, + } + + return s3instanceResource, secretResource +} + +func (t *TestUtils) SetupClient(objects []client.Object) { + // Register the custom resource with the scheme sch := runtime.NewScheme() + s := scheme.Scheme + s3v1alpha1.AddToScheme(s) + corev1.AddToScheme(s) + + client := fake.NewClientBuilder(). + WithScheme(s). + WithObjects(objects...). + WithStatusSubresource(objects...). + Build() + + t.Client = client +}