From 5e5a943ec45b802e0cfbd33fbd27f361d3b38850 Mon Sep 17 00:00:00 2001 From: Bharath B Date: Fri, 19 Sep 2025 19:20:15 +0530 Subject: [PATCH 1/7] ESO-101: Adds vendor packages required for API integration tests Signed-off-by: Bharath B --- go.mod | 27 +- go.sum | 54 +- vendor/github.com/ghodss/yaml/.gitignore | 20 + vendor/github.com/ghodss/yaml/.travis.yml | 7 + vendor/github.com/ghodss/yaml/LICENSE | 50 ++ vendor/github.com/ghodss/yaml/README.md | 121 ++++ vendor/github.com/ghodss/yaml/fields.go | 501 ++++++++++++++ vendor/github.com/ghodss/yaml/yaml.go | 277 ++++++++ vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md | 60 ++ vendor/github.com/onsi/ginkgo/v2/core_dsl.go | 32 +- .../onsi/ginkgo/v2/decorator_dsl.go | 43 ++ .../ginkgo/v2/ginkgo/build/build_command.go | 1 - .../ginkgo/v2/ginkgo/watch/dependencies.go | 15 +- .../github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go | 3 + .../onsi/ginkgo/v2/internal/around_node.go | 34 + .../onsi/ginkgo/v2/internal/focus.go | 9 +- .../onsi/ginkgo/v2/internal/group.go | 28 +- .../onsi/ginkgo/v2/internal/node.go | 83 ++- .../ginkgo/v2/internal/progress_report.go | 2 +- .../onsi/ginkgo/v2/internal/spec_context.go | 27 + .../onsi/ginkgo/v2/internal/suite.go | 39 +- .../internal/testingtproxy/testing_t_proxy.go | 6 + .../ginkgo/v2/reporters/default_reporter.go | 39 +- .../onsi/ginkgo/v2/reporters/junit_report.go | 9 + .../ginkgo/v2/reporters/teamcity_report.go | 8 + .../onsi/ginkgo/v2/types/around_node.go | 56 ++ .../github.com/onsi/ginkgo/v2/types/config.go | 16 +- .../github.com/onsi/ginkgo/v2/types/errors.go | 18 + .../onsi/ginkgo/v2/types/semver_filter.go | 60 ++ .../github.com/onsi/ginkgo/v2/types/types.go | 139 ++-- .../onsi/ginkgo/v2/types/version.go | 2 +- vendor/github.com/onsi/gomega/CHANGELOG.md | 10 + vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- .../onsi/gomega/internal/async_assertion.go | 2 +- .../matchers/be_comparable_to_matcher.go | 3 +- .../gomega/matchers/match_yaml_matcher.go | 2 +- .../vmware-archive/yaml-patch/LICENSE | 201 ++++++ .../vmware-archive/yaml-patch/Makefile | 17 + .../vmware-archive/yaml-patch/README.md | 86 +++ .../vmware-archive/yaml-patch/container.go | 167 +++++ .../vmware-archive/yaml-patch/node.go | 83 +++ .../vmware-archive/yaml-patch/operation.go | 181 +++++ .../vmware-archive/yaml-patch/patch.go | 60 ++ .../vmware-archive/yaml-patch/pathfinder.go | 109 +++ .../yaml-patch/placeholder_wrapper.go | 51 ++ vendor/golang.org/x/net/http2/http2.go | 2 - vendor/golang.org/x/sys/unix/mkerrors.sh | 3 + .../golang.org/x/sys/unix/syscall_darwin.go | 56 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 44 +- .../x/sys/unix/zerrors_linux_386.go | 2 + .../x/sys/unix/zerrors_linux_amd64.go | 2 + .../x/sys/unix/zerrors_linux_arm.go | 2 + .../x/sys/unix/zerrors_linux_arm64.go | 2 + .../x/sys/unix/zerrors_linux_loong64.go | 2 + .../x/sys/unix/zerrors_linux_mips.go | 2 + .../x/sys/unix/zerrors_linux_mips64.go | 2 + .../x/sys/unix/zerrors_linux_mips64le.go | 2 + .../x/sys/unix/zerrors_linux_mipsle.go | 2 + .../x/sys/unix/zerrors_linux_ppc.go | 2 + .../x/sys/unix/zerrors_linux_ppc64.go | 2 + .../x/sys/unix/zerrors_linux_ppc64le.go | 2 + .../x/sys/unix/zerrors_linux_riscv64.go | 2 + .../x/sys/unix/zerrors_linux_s390x.go | 2 + .../x/sys/unix/zerrors_linux_sparc64.go | 2 + .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_loong64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 37 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 30 +- .../x/sys/unix/ztypes_linux_amd64.go | 28 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 32 +- .../x/sys/unix/ztypes_linux_arm64.go | 28 +- .../x/sys/unix/ztypes_linux_loong64.go | 28 +- .../x/sys/unix/ztypes_linux_mips.go | 30 +- .../x/sys/unix/ztypes_linux_mips64.go | 28 +- .../x/sys/unix/ztypes_linux_mips64le.go | 28 +- .../x/sys/unix/ztypes_linux_mipsle.go | 30 +- .../golang.org/x/sys/unix/ztypes_linux_ppc.go | 32 +- .../x/sys/unix/ztypes_linux_ppc64.go | 28 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 28 +- .../x/sys/unix/ztypes_linux_riscv64.go | 28 +- .../x/sys/unix/ztypes_linux_s390x.go | 28 +- .../x/sys/unix/ztypes_linux_sparc64.go | 28 +- vendor/golang.org/x/term/term_windows.go | 4 +- vendor/golang.org/x/term/terminal.go | 9 +- .../x/tools/go/ast/astutil/enclosing.go | 2 +- .../x/tools/go/ast/inspector/inspector.go | 1 + .../x/tools/go/ast/inspector/typeof.go | 2 - vendor/golang.org/x/tools/go/packages/doc.go | 2 + vendor/golang.org/x/tools/go/ssa/builder.go | 2 +- .../tools/internal/imports/source_modindex.go | 47 +- .../x/tools/internal/modindex/directories.go | 148 ++-- .../x/tools/internal/modindex/index.go | 233 ++++--- .../x/tools/internal/modindex/modindex.go | 205 +++--- .../x/tools/internal/modindex/symbols.go | 69 +- .../x/tools/internal/modindex/types.go | 25 - .../protobuf/encoding/protowire/wire.go | 26 +- .../editiondefaults/editions_defaults.binpb | Bin 146 -> 154 bytes .../protobuf/internal/filedesc/editions.go | 3 + .../protobuf/internal/filedesc/presence.go | 33 + .../protobuf/internal/genid/descriptor_gen.go | 90 ++- .../internal/impl/codec_message_opaque.go | 3 +- .../protobuf/internal/impl/message_opaque.go | 45 +- .../protobuf/internal/impl/presence.go | 3 - .../protobuf/internal/version/version.go | 2 +- .../reflect/protoreflect/source_gen.go | 8 + .../types/descriptorpb/descriptor.pb.go | 633 ++++++++++++------ vendor/modules.txt | 33 +- .../pkg/envtest/komega/OWNERS | 13 + .../pkg/envtest/komega/default.go | 102 +++ .../pkg/envtest/komega/equalobject.go | 297 ++++++++ .../pkg/envtest/komega/interfaces.go | 76 +++ .../pkg/envtest/komega/komega.go | 117 ++++ .../pkg/webhook/admission/multi.go | 6 + 126 files changed, 4527 insertions(+), 1093 deletions(-) create mode 100644 vendor/github.com/ghodss/yaml/.gitignore create mode 100644 vendor/github.com/ghodss/yaml/.travis.yml create mode 100644 vendor/github.com/ghodss/yaml/LICENSE create mode 100644 vendor/github.com/ghodss/yaml/README.md create mode 100644 vendor/github.com/ghodss/yaml/fields.go create mode 100644 vendor/github.com/ghodss/yaml/yaml.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/internal/around_node.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/around_node.go create mode 100644 vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go create mode 100644 vendor/github.com/vmware-archive/yaml-patch/LICENSE create mode 100644 vendor/github.com/vmware-archive/yaml-patch/Makefile create mode 100644 vendor/github.com/vmware-archive/yaml-patch/README.md create mode 100644 vendor/github.com/vmware-archive/yaml-patch/container.go create mode 100644 vendor/github.com/vmware-archive/yaml-patch/node.go create mode 100644 vendor/github.com/vmware-archive/yaml-patch/operation.go create mode 100644 vendor/github.com/vmware-archive/yaml-patch/patch.go create mode 100644 vendor/github.com/vmware-archive/yaml-patch/pathfinder.go create mode 100644 vendor/github.com/vmware-archive/yaml-patch/placeholder_wrapper.go delete mode 100644 vendor/golang.org/x/tools/internal/modindex/types.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/presence.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/OWNERS create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/default.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/equalobject.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/interfaces.go create mode 100644 vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/komega.go diff --git a/go.mod b/go.mod index 912af51a..e75bcefe 100644 --- a/go.mod +++ b/go.mod @@ -6,14 +6,16 @@ require ( github.com/aws/aws-sdk-go v1.55.7 github.com/cert-manager/cert-manager v1.18.2 github.com/elastic/crd-ref-docs v0.1.0 + github.com/ghodss/yaml v1.0.0 github.com/go-bindata/go-bindata v3.1.2+incompatible github.com/go-logr/logr v1.4.3 github.com/golangci/golangci-lint v1.64.8 github.com/maxbrunsfeld/counterfeiter/v6 v6.11.2 - github.com/onsi/ginkgo/v2 v2.23.4 - github.com/onsi/gomega v1.38.0 + github.com/onsi/ginkgo/v2 v2.25.3 + github.com/onsi/gomega v1.38.2 github.com/openshift/build-machinery-go v0.0.0-20250806130835-622c0378eb0d github.com/stretchr/testify v1.10.0 + github.com/vmware-archive/yaml-patch v0.0.11 go.uber.org/zap v1.27.0 golang.org/x/vuln v1.1.4 k8s.io/api v0.32.3 @@ -23,7 +25,7 @@ require ( k8s.io/klog/v2 v2.130.1 k8s.io/kubernetes v1.32.6 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 - sigs.k8s.io/controller-runtime v0.20.4 + sigs.k8s.io/controller-runtime v0.20.5-0.20250517180713-32e5e9e948a5 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250308055145-5fe7bb3edc86 sigs.k8s.io/controller-tools v0.17.3 sigs.k8s.io/kustomize/kustomize/v5 v5.5.0 @@ -156,6 +158,7 @@ require ( github.com/karamaru-alpha/copyloopvar v1.2.1 // indirect github.com/kisielk/errcheck v1.9.0 // indirect github.com/kkHAIKE/contextcheck v1.1.6 // indirect + github.com/krishicks/yaml-patch v0.0.10 // indirect github.com/kulti/thelper v0.6.3 // indirect github.com/kunwardeep/paralleltest v1.0.10 // indirect github.com/kylelemons/godebug v1.1.0 // indirect @@ -269,26 +272,26 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.40.0 // indirect + golang.org/x/crypto v0.41.0 // indirect golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc // indirect golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac // indirect - golang.org/x/mod v0.26.0 // indirect - golang.org/x/net v0.42.0 // indirect + golang.org/x/mod v0.27.0 // indirect + golang.org/x/net v0.43.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.34.0 // indirect - golang.org/x/telemetry v0.0.0-20250710130107-8d8967aff50b // indirect - golang.org/x/term v0.33.0 // indirect - golang.org/x/text v0.27.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488 // indirect + golang.org/x/term v0.34.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.12.0 // indirect - golang.org/x/tools v0.35.0 // indirect + golang.org/x/tools v0.36.0 // indirect golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect google.golang.org/genproto v0.0.0-20250707201910-8d1bb00bc6a7 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20250707201910-8d1bb00bc6a7 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect google.golang.org/grpc v1.73.0 // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/protobuf v1.36.7 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/go.sum b/go.sum index 7784b63b..8e09e6a1 100644 --- a/go.sum +++ b/go.sum @@ -142,6 +142,8 @@ github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo= github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY= github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghostiam/protogetter v0.3.9 h1:j+zlLLWzqLay22Cz/aYwTHKQ88GE2DQ6GkWSYFOI4lQ= github.com/ghostiam/protogetter v0.3.9/go.mod h1:WZ0nw9pfzsgxuRsPOFQomgDVSWtDLJRfQJEhsGbmQMA= github.com/go-bindata/go-bindata v3.1.2+incompatible h1:5vjJMVhowQdPzjE1LdxyFF7YFTXg5IgGVW4gBr5IbvE= @@ -333,6 +335,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/krishicks/yaml-patch v0.0.10 h1:H4FcHpnNwVmw8u0MjPRjWyIXtco6zM2F78t+57oNM3E= +github.com/krishicks/yaml-patch v0.0.10/go.mod h1:Sm5TchwZS6sm7RJoyg87tzxm2ZcKzdRE4Q7TjNhPrME= github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs= github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I= github.com/kunwardeep/paralleltest v1.0.10 h1:wrodoaKYzS2mdNVnc4/w31YaXFtsc21PCTdvWJ/lDDs= @@ -413,10 +417,10 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= -github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= -github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= -github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= +github.com/onsi/ginkgo/v2 v2.25.3 h1:Ty8+Yi/ayDAGtk4XxmmfUy4GabvM+MegeB4cDLRi6nw= +github.com/onsi/ginkgo/v2 v2.25.3/go.mod h1:43uiyQC4Ed2tkOzLsEYm7hnrb7UJTWHYNsuy3bG/snE= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/openshift/build-machinery-go v0.0.0-20250806130835-622c0378eb0d h1:iwdrJUzp+GsbCNL84aZtSYwKSjrtxUJJ0cnVH8OsIeU= @@ -562,6 +566,8 @@ github.com/uudashr/gocognit v1.2.0 h1:3BU9aMr1xbhPlvJLSydKwdLN3tEUUrzPSSM8S4hDYR github.com/uudashr/gocognit v1.2.0/go.mod h1:k/DdKPI6XBZO1q7HgoV2juESI2/Ofj9AcHPZhBBdrTU= github.com/uudashr/iface v1.3.1 h1:bA51vmVx1UIhiIsQFSNq6GZ6VPTk3WNMZgRiCe9R29U= github.com/uudashr/iface v1.3.1/go.mod h1:4QvspiRd3JLPAEXBQ9AiZpLbJlrWWgRChOKDJEuQTdg= +github.com/vmware-archive/yaml-patch v0.0.11 h1:9o4FGgkpLD88A5O7BOOXs7UBeeymRT9atLsKmHJ2wWs= +github.com/vmware-archive/yaml-patch v0.0.11/go.mod h1:mHWEn1O1CU3yBnN6iPFeAwAqzUibF2X+9EltQ28w+Vs= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xen0n/gosmopolitan v1.2.2 h1:/p2KTnMzwRexIW8GlKawsTWOxn7UHA+jCMF/V8HHtvU= @@ -647,8 +653,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= -golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= -golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc h1:TS73t7x3KarrNd5qAipmspBDS1rkMcgVG/fS1aRb4Rc= golang.org/x/exp v0.0.0-20250711185948-6ae5c78190dc/go.mod h1:A+z0yzpGtvnG90cToK5n2tu8UJVP2XUATh+r+sfOOOc= golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= @@ -666,8 +672,8 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.13.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= -golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= +golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= +golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -683,8 +689,8 @@ golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= golang.org/x/net v0.16.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= -golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= -golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -720,10 +726,10 @@ golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/telemetry v0.0.0-20250710130107-8d8967aff50b h1:DU+gwOBXU+6bO0sEyO7o/NeMlxZxCZEvI7v+J4a1zRQ= -golang.org/x/telemetry v0.0.0-20250710130107-8d8967aff50b/go.mod h1:4ZwOYna0/zsOKwuR5X/m0QFOJpSZvAxFfkQT+Erd9D4= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488 h1:3doPGa+Gg4snce233aCWnbZVFsyFMo/dR40KK/6skyE= +golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488/go.mod h1:fGb/2+tgXXjhjHsTNdVEEMZNWA0quBnfrO+AfoDSAKw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= @@ -732,8 +738,8 @@ golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= -golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= -golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -744,8 +750,8 @@ golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE= golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -768,8 +774,8 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= -golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= +golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= +golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= @@ -792,8 +798,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1: google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok= google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= +google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -853,8 +859,8 @@ mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f h1:lMpcwN6GxNbWtbpI1+xzFLSW8 mvdan.cc/unparam v0.0.0-20240528143540-8a5130ca722f/go.mod h1:RSLa7mKKCNeTTMHBw5Hsy2rfJmd6O2ivt9Dw9ZqCQpQ= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/controller-runtime v0.20.5-0.20250517180713-32e5e9e948a5 h1:gWRqA5IcpiKl9jSzd+gzZVJWEDmJAXjZNYbo3owIzYc= +sigs.k8s.io/controller-runtime v0.20.5-0.20250517180713-32e5e9e948a5/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250308055145-5fe7bb3edc86 h1:96TA+X7D58V3065duUfj+p+Pp17q8U02+cSCmE3IsaU= sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250308055145-5fe7bb3edc86/go.mod h1:IaDsO8xSPRxRG1/rm9CP7+jPmj0nMNAuNi/yiHnLX8k= sigs.k8s.io/controller-tools v0.17.3 h1:lwFPLicpBKLgIepah+c8ikRBubFW5kOQyT88r3EwfNw= diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore new file mode 100644 index 00000000..e256a31e --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml new file mode 100644 index 00000000..0e9d6edc --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 +script: + - go test + - go build diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 00000000..7805d36d --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 00000000..0200f75b --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 00000000..58600740 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 00000000..4fb4054a --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,277 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = json.Unmarshal(j, o) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yaml.Unmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 69b15d18..0e5f2378 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,63 @@ +## 2.25.3 + +### Fixes + +- emit --github-output group only for progress report itself [f01aed1] + +## 2.25.2 + +### Fixes +Add github output group for progress report content + +### Maintenance +Bump Gomega + +## 2.25.1 + +### Fixes +- fix(types): ignore nameless nodes on FullText() [10866d3] +- chore: fix some CodeQL warnings [2e42cff] + +## 2.25.0 + +### `AroundNode` + +This release introduces a new decorator to support more complex spec setup usecases. + +`AroundNode` registers a function that runs before each individual node. This is considered a more advanced decorator. + +Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information and some examples. + +Allowed signatures: + +- `AroundNode(func())` - `func` will be called before the node is run. +- `AroundNode(func(ctx context.Context) context.Context)` - `func` can wrap the passed in context and return a new one which will be passed on to the node. +- `AroundNode(func(ctx context.Context, body func(ctx context.Context)))` - `ctx` is the context for the node and `body` is a function that must be called to run the node. This gives you complete control over what runs before and after the node. + +Multiple `AroundNode` decorators can be applied to a single node and they will run in the order they are applied. + +Unlike setup nodes like `BeforeEach` and `DeferCleanup`, `AroundNode` is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call `runtime.LockOSThread()` in the `AroundNode` to ensure that the node runs on a single thread). + +Since `AroundNode` allows you to modify the context you can also use `AroundNode` to implement shared setup that attaches values to the context. + +If applied to a container, `AroundNode` will run before every node in the container. Including setup nodes like `BeforeEach` and `DeferCleanup`. + +`AroundNode` can also be applied to `RunSpecs` to run before every node in the suite. This opens up new mechanisms for instrumenting individual nodes across an entire suite. + +## 2.24.0 + +### Features + +Specs can now be decorated with (e.g.) `SemVerConstraint("2.1.0")` and `ginkgo --sem-ver-filter="2.1.1"` will only run constrained specs that match the requested version. Learn more in the docs [here](https://onsi.github.io/ginkgo/#spec-semantic-version-filtering)! Thanks to @Icarus9913 for the PR. + +### Fixes + +- remove -o from run command [3f5d379]. fixes [#1582](https://github.com/onsi/ginkgo/issues/1582) + +### Maintenance + +Numerous dependency bumps and documentation fixes + ## 2.23.4 Prior to this release Ginkgo would compute the incorrect number of available CPUs when running with `-p` in a linux container. Thanks to @emirot for the fix! diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index d027bdff..ec41e883 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -186,6 +186,20 @@ func GinkgoLabelFilter() string { return suiteConfig.LabelFilter } +/* +GinkgoSemVerFilter() returns the semantic version filter configured for this suite via `--sem-ver-filter`. + +You can use this to manually check if a set of semantic version constraints would satisfy the filter via: + + if (SemVerConstraint("> 2.6.0", "< 2.8.0").MatchesSemVerFilter(GinkgoSemVerFilter())) { + //... + } +*/ +func GinkgoSemVerFilter() string { + suiteConfig, _ := GinkgoConfiguration() + return suiteConfig.SemVerFilter +} + /* PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant when running in parallel and output to stdout/stderr is being intercepted. You generally @@ -254,7 +268,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { } defer global.PopClone() - suiteLabels := extractSuiteConfiguration(args) + suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args) var reporter reporters.Reporter if suiteConfig.ParallelTotal == 1 { @@ -297,7 +311,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + passed, hasFocusedTests := global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) outputInterceptor.Shutdown() flagSet.ValidateDeprecations(deprecationTracker) @@ -316,8 +330,10 @@ func RunSpecs(t GinkgoTestingT, description string, args ...any) bool { return passed } -func extractSuiteConfiguration(args []any) Labels { +func extractSuiteConfiguration(args []any) (Labels, SemVerConstraints, types.AroundNodes) { suiteLabels := Labels{} + suiteSemVerConstraints := SemVerConstraints{} + aroundNodes := types.AroundNodes{} configErrors := []error{} for _, arg := range args { switch arg := arg.(type) { @@ -327,6 +343,10 @@ func extractSuiteConfiguration(args []any) Labels { reporterConfig = arg case Labels: suiteLabels = append(suiteLabels, arg...) + case SemVerConstraints: + suiteSemVerConstraints = append(suiteSemVerConstraints, arg...) + case types.AroundNodeDecorator: + aroundNodes = append(aroundNodes, arg) default: configErrors = append(configErrors, types.GinkgoErrors.UnknownTypePassedToRunSpecs(arg)) } @@ -342,7 +362,7 @@ func extractSuiteConfiguration(args []any) Labels { os.Exit(1) } - return suiteLabels + return suiteLabels, suiteSemVerConstraints, aroundNodes } func getwd() (string, error) { @@ -365,7 +385,7 @@ func PreviewSpecs(description string, args ...any) Report { } defer global.PopClone() - suiteLabels := extractSuiteConfiguration(args) + suiteLabels, suiteSemVerConstraints, suiteAroundNodes := extractSuiteConfiguration(args) priorDryRun, priorParallelTotal, priorParallelProcess := suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess suiteConfig.DryRun, suiteConfig.ParallelTotal, suiteConfig.ParallelProcess = true, 1, 1 defer func() { @@ -383,7 +403,7 @@ func PreviewSpecs(description string, args ...any) Report { suitePath, err = filepath.Abs(suitePath) exitIfErr(err) - global.Suite.Run(description, suiteLabels, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) + global.Suite.Run(description, suiteLabels, suiteSemVerConstraints, suiteAroundNodes, suitePath, global.Failer, reporter, writer, outputInterceptor, interrupt_handler.NewInterruptHandler(client), client, internal.RegisterForProgressSignal, suiteConfig) return global.Suite.GetPreviewReport() } diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go index c65af4ce..8bee5ace 100644 --- a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -2,6 +2,7 @@ package ginkgo import ( "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/types" ) /* @@ -99,6 +100,23 @@ You can learn more here: https://onsi.github.io/ginkgo/#spec-labels */ type Labels = internal.Labels +/* +SemVerConstraint decorates specs with SemVerConstraints. Multiple semantic version constraints can be passed to SemVerConstraint and these strings must follow the semantic version constraint rules. +SemVerConstraints can be applied to container and subject nodes, but not setup nodes. You can provide multiple SemVerConstraints to a given node and a spec's semantic version constraints is the union of all semantic version constraints in its node hierarchy. + +You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +func SemVerConstraint(semVerConstraints ...string) SemVerConstraints { + return SemVerConstraints(semVerConstraints) +} + +/* +SemVerConstraints are the type for spec SemVerConstraint decorators. Use SemVerConstraint(...) to construct SemVerConstraints. +You can learn more here: https://onsi.github.io/ginkgo/#spec-semantic-version-filtering +*/ +type SemVerConstraints = internal.SemVerConstraints + /* PollProgressAfter allows you to override the configured value for --poll-progress-after for a particular node. @@ -141,3 +159,28 @@ SuppressProgressReporting is a decorator that allows you to disable progress rep if you have a `ReportAfterEach` node that is running for every skipped spec and is generating lots of progress reports. */ const SuppressProgressReporting = internal.SuppressProgressReporting + +/* +AroundNode registers a function that runs before each individual node. This is considered a more advanced decorator. + +Please read the [docs](https://onsi.github.io/ginkgo/#advanced-around-node) for more information. + +Allowed signatures: + +- AroundNode(func()) - func will be called before the node is run. +- AroundNode(func(ctx context.Context) context.Context) - func can wrap the passed in context and return a new one which will be passed on to the node. +- AroundNode(func(ctx context.Context, body func(ctx context.Context))) - ctx is the context for the node and body is a function that must be called to run the node. This gives you complete control over what runs before and after the node. + +Multiple AroundNode decorators can be applied to a single node and they will run in the order they are applied. + +Unlike setup nodes like BeforeEach and DeferCleanup, AroundNode is guaranteed to run in the same goroutine as the decorated node. This is necessary when working with lower-level libraries that must run on a single thread (you can call runtime.LockOSThread() in the AroundNode to ensure that the node runs on a single thread). + +Since AroundNode allows you to modify the context you can also use AroundNode to implement shared setup that attaches values to the context. You must return a context that inherits from the passed in context. + +If applied to a container, AroundNode will run before every node in the container. Including setup nodes like BeforeEach and DeferCleanup. + +AroundNode can also be applied to RunSpecs to run before every node in the suite. +*/ +func AroundNode[F types.AroundNodeAllowedFuncs](f F) types.AroundNodeDecorator { + return types.AroundNode(f, types.NewCodeLocation(1)) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go index 2b36b2fe..3021dfec 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -29,7 +29,6 @@ func BuildBuildCommand() command.Command { var errors []error cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) command.AbortIfErrors("Ginkgo detected configuration issues:", errors) - buildSpecs(args, cliConfig, goFlagsConfig) }, } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go index a34d9435..75cbdb49 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -2,12 +2,9 @@ package watch import ( "go/build" - "regexp" + "strings" ) -var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) -var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing - type Dependencies struct { deps map[string]int } @@ -78,7 +75,7 @@ func (d Dependencies) resolveAndAdd(deps []string, depth int) { if err != nil { continue } - if !pkg.Goroot && (!ginkgoAndGomegaFilter.MatchString(pkg.Dir) || ginkgoIntegrationTestFilter.MatchString(pkg.Dir)) { + if !pkg.Goroot && (!matchesGinkgoOrGomega(pkg.Dir) || matchesGinkgoIntegration(pkg.Dir)) { d.addDepIfNotPresent(pkg.Dir, depth) } } @@ -90,3 +87,11 @@ func (d Dependencies) addDepIfNotPresent(dep string, depth int) { d.deps[dep] = depth } } + +func matchesGinkgoOrGomega(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo") || strings.Contains(s, "github.com/onsi/gomega") +} + +func matchesGinkgoIntegration(s string) bool { + return strings.Contains(s, "github.com/onsi/ginkgo/integration") // allow us to integration test this thing +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 993279de..cabf2814 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -2,6 +2,7 @@ package ginkgo import ( "context" + "io" "testing" "github.com/onsi/ginkgo/v2/internal/testingtproxy" @@ -69,6 +70,8 @@ type GinkgoTInterface interface { Skipf(format string, args ...any) Skipped() bool TempDir() string + Attr(key, value string) + Output() io.Writer } /* diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go b/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go new file mode 100644 index 00000000..c9657102 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/around_node.go @@ -0,0 +1,34 @@ +package internal + +import ( + "github.com/onsi/ginkgo/v2/types" +) + +func ComputeAroundNodes(specs Specs) Specs { + out := Specs{} + for _, spec := range specs { + nodes := Nodes{} + currentNestingLevel := 0 + aroundNodes := types.AroundNodes{} + nestingLevelIndices := []int{} + for _, node := range spec.Nodes { + switch node.NodeType { + case types.NodeTypeContainer: + currentNestingLevel = node.NestingLevel + 1 + nestingLevelIndices = append(nestingLevelIndices, len(aroundNodes)) + aroundNodes = aroundNodes.Append(node.AroundNodes...) + nodes = append(nodes, node) + default: + if currentNestingLevel > node.NestingLevel { + currentNestingLevel = node.NestingLevel + aroundNodes = aroundNodes[:nestingLevelIndices[currentNestingLevel]] + } + node.AroundNodes = types.AroundNodes{}.Append(aroundNodes...).Append(node.AroundNodes...) + nodes = append(nodes, node) + } + } + spec.Nodes = nodes + out = append(out, spec) + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go index e3da7d14..a39daf5a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/focus.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/focus.go @@ -56,7 +56,7 @@ This function sets the `Skip` property on specs by applying Ginkgo's focus polic *Note:* specs with pending nodes are Skipped when created by NewSpec. */ -func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteConfig types.SuiteConfig) (Specs, bool) { +func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteConfig types.SuiteConfig) (Specs, bool) { focusString := strings.Join(suiteConfig.FocusStrings, "|") skipString := strings.Join(suiteConfig.SkipStrings, "|") @@ -84,6 +84,13 @@ func ApplyFocusToSpecs(specs Specs, description string, suiteLabels Labels, suit }) } + if suiteConfig.SemVerFilter != "" { + semVerFilter, _ := types.ParseSemVerFilter(suiteConfig.SemVerFilter) + skipChecks = append(skipChecks, func(spec Spec) bool { + return !semVerFilter(UnionOfSemVerConstraints(suiteSemVerConstraints, spec.Nodes.UnionOfSemVerConstraints())) + }) + } + if len(suiteConfig.FocusFiles) > 0 { focusFilters, _ := types.ParseFileFilters(suiteConfig.FocusFiles) skipChecks = append(skipChecks, func(spec Spec) bool { return !focusFilters.Matches(spec.Nodes.CodeLocations()) }) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go index 02c9fe4f..b88fe206 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/group.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -112,19 +112,21 @@ func newGroup(suite *Suite) *group { func (g *group) initialReportForSpec(spec Spec) types.SpecReport { return types.SpecReport{ - ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), - ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), - ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), - LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, - LeafNodeType: types.NodeTypeIt, - LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, - LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), - ParallelProcess: g.suite.config.ParallelProcess, - RunningInParallel: g.suite.isRunningInParallel(), - IsSerial: spec.Nodes.HasNodeMarkedSerial(), - IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), - MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), - MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), + ContainerHierarchyTexts: spec.Nodes.WithType(types.NodeTypeContainer).Texts(), + ContainerHierarchyLocations: spec.Nodes.WithType(types.NodeTypeContainer).CodeLocations(), + ContainerHierarchyLabels: spec.Nodes.WithType(types.NodeTypeContainer).Labels(), + ContainerHierarchySemVerConstraints: spec.Nodes.WithType(types.NodeTypeContainer).SemVerConstraints(), + LeafNodeLocation: spec.FirstNodeWithType(types.NodeTypeIt).CodeLocation, + LeafNodeType: types.NodeTypeIt, + LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, + LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), + LeafNodeSemVerConstraints: []string(spec.FirstNodeWithType(types.NodeTypeIt).SemVerConstraints), + ParallelProcess: g.suite.config.ParallelProcess, + RunningInParallel: g.suite.isRunningInParallel(), + IsSerial: spec.Nodes.HasNodeMarkedSerial(), + IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), + MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), + MaxMustPassRepeatedly: spec.Nodes.GetMaxMustPassRepeatedly(), } } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 8096950b..647368fe 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -55,11 +55,13 @@ type Node struct { FlakeAttempts int MustPassRepeatedly int Labels Labels + SemVerConstraints SemVerConstraints PollProgressAfter time.Duration PollProgressInterval time.Duration NodeTimeout time.Duration SpecTimeout time.Duration GracePeriod time.Duration + AroundNodes types.AroundNodes NodeIDWhereCleanupWasGenerated uint } @@ -85,31 +87,46 @@ type FlakeAttempts uint type MustPassRepeatedly uint type Offset uint type Done chan<- any // Deprecated Done Channel for asynchronous testing -type Labels []string type PollProgressInterval time.Duration type PollProgressAfter time.Duration type NodeTimeout time.Duration type SpecTimeout time.Duration type GracePeriod time.Duration +type Labels []string + func (l Labels) MatchesLabelFilter(query string) bool { return types.MustParseLabelFilter(query)(l) } -func UnionOfLabels(labels ...Labels) Labels { - out := Labels{} - seen := map[string]bool{} - for _, labelSet := range labels { - for _, label := range labelSet { - if !seen[label] { - seen[label] = true - out = append(out, label) +type SemVerConstraints []string + +func (svc SemVerConstraints) MatchesSemVerFilter(version string) bool { + return types.MustParseSemVerFilter(version)(svc) +} + +func unionOf[S ~[]E, E comparable](slices ...S) S { + out := S{} + seen := map[E]bool{} + for _, slice := range slices { + for _, item := range slice { + if !seen[item] { + seen[item] = true + out = append(out, item) } } } return out } +func UnionOfLabels(labels ...Labels) Labels { + return unionOf(labels...) +} + +func UnionOfSemVerConstraints(semVerConstraints ...SemVerConstraints) SemVerConstraints { + return unionOf(semVerConstraints...) +} + func PartitionDecorations(args ...any) ([]any, []any) { decorations := []any{} remainingArgs := []any{} @@ -151,6 +168,8 @@ func isDecoration(arg any) bool { return true case t == reflect.TypeOf(Labels{}): return true + case t == reflect.TypeOf(SemVerConstraints{}): + return true case t == reflect.TypeOf(PollProgressInterval(0)): return true case t == reflect.TypeOf(PollProgressAfter(0)): @@ -161,6 +180,8 @@ func isDecoration(arg any) bool { return true case t == reflect.TypeOf(GracePeriod(0)): return true + case t == reflect.TypeOf(types.AroundNodeDecorator{}): + return true case t.Kind() == reflect.Slice && isSliceOfDecorations(arg): return true default: @@ -191,6 +212,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy NodeType: nodeType, Text: text, Labels: Labels{}, + SemVerConstraints: SemVerConstraints{}, CodeLocation: types.NewCodeLocation(baseOffset), NestingLevel: -1, PollProgressAfter: -1, @@ -221,6 +243,7 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy } labelsSeen := map[string]bool{} + semVerConstraintsSeen := map[string]bool{} trackedFunctionError := false args = remainingArgs remainingArgs = []any{} @@ -299,6 +322,8 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy if nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "GracePeriod")) } + case t == reflect.TypeOf(types.AroundNodeDecorator{}): + node.AroundNodes = append(node.AroundNodes, arg.(types.AroundNodeDecorator)) case t == reflect.TypeOf(Labels{}): if !nodeType.Is(types.NodeTypesForContainerAndIt) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Label")) @@ -311,6 +336,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy appendError(err) } } + case t == reflect.TypeOf(SemVerConstraints{}): + if !nodeType.Is(types.NodeTypesForContainerAndIt) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SemVerConstraint")) + } + for _, semVerConstraint := range arg.(SemVerConstraints) { + if !semVerConstraintsSeen[semVerConstraint] { + semVerConstraintsSeen[semVerConstraint] = true + semVerConstraint, err := types.ValidateAndCleanupSemVerConstraint(semVerConstraint, node.CodeLocation) + node.SemVerConstraints = append(node.SemVerConstraints, semVerConstraint) + appendError(err) + } + } case t.Kind() == reflect.Func: if nodeType.Is(types.NodeTypeContainer) { if node.Body != nil { @@ -824,6 +861,32 @@ func (n Nodes) UnionOfLabels() []string { return out } +func (n Nodes) SemVerConstraints() [][]string { + out := make([][]string, len(n)) + for i := range n { + if n[i].SemVerConstraints == nil { + out[i] = []string{} + } else { + out[i] = []string(n[i].SemVerConstraints) + } + } + return out +} + +func (n Nodes) UnionOfSemVerConstraints() []string { + out := []string{} + seen := map[string]bool{} + for i := range n { + for _, constraint := range n[i].SemVerConstraints { + if !seen[constraint] { + seen[constraint] = true + out = append(out, constraint) + } + } + } + return out +} + func (n Nodes) CodeLocations() []types.CodeLocation { out := make([]types.CodeLocation, len(n)) for i := range n { @@ -928,7 +991,7 @@ func unrollInterfaceSlice(args any) []any { out := []any{} for i := 0; i < v.Len(); i++ { el := reflect.ValueOf(v.Index(i).Interface()) - if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) { + if el.Kind() == reflect.Slice && el.Type() != reflect.TypeOf(Labels{}) && el.Type() != reflect.TypeOf(SemVerConstraints{}) { out = append(out, unrollInterfaceSlice(el.Interface())...) } else { out = append(out, v.Index(i).Interface()) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go index 11269cf1..165cbc4b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go @@ -236,7 +236,7 @@ func extractRunningGoroutines() ([]types.Goroutine, error) { } functionCall.Filename = line[:delimiterIdx] line = strings.Split(line[delimiterIdx+1:], " ")[0] - lineNumber, err := strconv.ParseInt(line, 10, 64) + lineNumber, err := strconv.ParseInt(line, 10, 32) functionCall.Line = int(lineNumber) if err != nil { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call line number: %s\n%s", line, err.Error())) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go index 2d2ea2fc..99c9c5f5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -2,6 +2,7 @@ package internal import ( "context" + "reflect" "github.com/onsi/ginkgo/v2/types" ) @@ -11,6 +12,7 @@ type SpecContext interface { SpecReport() types.SpecReport AttachProgressReporter(func() string) func() + WrappedContext() context.Context } type specContext struct { @@ -45,3 +47,28 @@ func NewSpecContext(suite *Suite) *specContext { func (sc *specContext) SpecReport() types.SpecReport { return sc.suite.CurrentSpecReport() } + +func (sc *specContext) WrappedContext() context.Context { + return sc.Context +} + +/* +The user is allowed to wrap `SpecContext` in a new context.Context when using AroundNodes. But body functions expect SpecContext. +We support this by taking their context.Context and returning a SpecContext that wraps it. +*/ +func wrapContextChain(ctx context.Context) SpecContext { + if ctx == nil { + return nil + } + if reflect.TypeOf(ctx) == reflect.TypeOf(&specContext{}) { + return ctx.(*specContext) + } else if sc, ok := ctx.Value("GINKGO_SPEC_CONTEXT").(*specContext); ok { + return &specContext{ + Context: ctx, + ProgressReporterManager: sc.ProgressReporterManager, + cancel: sc.cancel, + suite: sc.suite, + } + } + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 3edf5077..14a0688f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -32,6 +32,7 @@ type Suite struct { suiteNodes Nodes cleanupNodes Nodes + aroundNodes types.AroundNodes failer *Failer reporter reporters.Reporter @@ -87,6 +88,7 @@ func (suite *Suite) Clone() (*Suite, error) { ProgressReporterManager: NewProgressReporterManager(), topLevelContainers: suite.topLevelContainers.Clone(), suiteNodes: suite.suiteNodes.Clone(), + aroundNodes: suite.aroundNodes.Clone(), selectiveLock: &sync.Mutex{}, }, nil } @@ -104,13 +106,14 @@ func (suite *Suite) BuildTree() error { return nil } -func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { +func (suite *Suite) Run(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suiteAroundNodes types.AroundNodes, suitePath string, failer *Failer, reporter reporters.Reporter, writer WriterInterface, outputInterceptor OutputInterceptor, interruptHandler interrupt_handler.InterruptHandlerInterface, client parallel_support.Client, progressSignalRegistrar ProgressSignalRegistrar, suiteConfig types.SuiteConfig) (bool, bool) { if suite.phase != PhaseBuildTree { panic("cannot run before building the tree = call suite.BuildTree() first") } ApplyNestedFocusPolicyToTree(suite.tree) specs := GenerateSpecsFromTreeRoot(suite.tree) - specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteConfig) + specs, hasProgrammaticFocus := ApplyFocusToSpecs(specs, description, suiteLabels, suiteSemVerConstraints, suiteConfig) + specs = ComputeAroundNodes(specs) suite.phase = PhaseRun suite.client = client @@ -120,6 +123,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string suite.outputInterceptor = outputInterceptor suite.interruptHandler = interruptHandler suite.config = suiteConfig + suite.aroundNodes = suiteAroundNodes if suite.config.Timeout > 0 { suite.deadline = time.Now().Add(suite.config.Timeout) @@ -127,7 +131,7 @@ func (suite *Suite) Run(description string, suiteLabels Labels, suitePath string cancelProgressHandler := progressSignalRegistrar(suite.handleProgressSignal) - success := suite.runSpecs(description, suiteLabels, suitePath, hasProgrammaticFocus, specs) + success := suite.runSpecs(description, suiteLabels, suiteSemVerConstraints, suitePath, hasProgrammaticFocus, specs) cancelProgressHandler() @@ -259,6 +263,7 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeIDWhereCleanupWasGenerated = suite.currentNode.ID node.NestingLevel = suite.currentNode.NestingLevel + node.AroundNodes = types.AroundNodes{}.Append(suite.currentNode.AroundNodes...).Append(node.AroundNodes...) suite.selectiveLock.Lock() suite.cleanupNodes = append(suite.cleanupNodes, node) suite.selectiveLock.Unlock() @@ -428,13 +433,14 @@ func (suite *Suite) processCurrentSpecReport() { } } -func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { +func (suite *Suite) runSpecs(description string, suiteLabels Labels, suiteSemVerConstraints SemVerConstraints, suitePath string, hasProgrammaticFocus bool, specs Specs) bool { numSpecsThatWillBeRun := specs.CountWithoutSkip() suite.report = types.Report{ SuitePath: suitePath, SuiteDescription: description, SuiteLabels: suiteLabels, + SuiteSemVerConstraints: suiteSemVerConstraints, SuiteConfig: suite.config, SuiteHasProgrammaticFocus: hasProgrammaticFocus, PreRunStats: types.PreRunStats{ @@ -891,7 +897,30 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ failureC <- failureFromRun }() - node.Body(sc) + aroundNodes := types.AroundNodes{}.Append(suite.aroundNodes...).Append(node.AroundNodes...) + if len(aroundNodes) > 0 { + i := 0 + var f func(context.Context) + f = func(c context.Context) { + sc := wrapContextChain(c) + if sc == nil { + suite.failer.Fail("An AroundNode failed to pass a valid Ginkgo SpecContext in. You must always pass in a context derived from the context passed to you.", aroundNodes[i].CodeLocation) + return + } + i++ + if i < len(aroundNodes) { + aroundNodes[i].Body(sc, f) + } else { + node.Body(sc) + } + } + aroundNodes[0].Body(sc, f) + if i != len(aroundNodes) { + suite.failer.Fail("An AroundNode failed to call the passed in function.", aroundNodes[i].CodeLocation) + } + } else { + node.Body(sc) + } finished = true }() diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index b4ecc7cb..9806e315 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -229,3 +229,9 @@ func (t *ginkgoTestingTProxy) ParallelTotal() int { func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() { return t.attachProgressReporter(f) } +func (t *ginkgoTestingTProxy) Output() io.Writer { + return t.writer +} +func (t *ginkgoTestingTProxy) Attr(key, value string) { + t.addReportEntry(key, value, internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index 74ad0768..8c3714b8 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -72,6 +72,9 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { if len(report.SuiteLabels) > 0 { r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteLabels, ", "))) } + if len(report.SuiteSemVerConstraints) > 0 { + r.emit(r.f("{{coral}}[%s]{{/}} ", strings.Join(report.SuiteSemVerConstraints, ", "))) + } r.emit(r.f("- %d/%d specs ", report.PreRunStats.SpecsThatWillRun, report.PreRunStats.TotalSpecs)) if report.SuiteConfig.ParallelTotal > 1 { r.emit(r.f("- %d procs ", report.SuiteConfig.ParallelTotal)) @@ -87,6 +90,13 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { bannerWidth = len(labels) + 2 } } + if len(report.SuiteSemVerConstraints) > 0 { + semVerConstraints := strings.Join(report.SuiteSemVerConstraints, ", ") + r.emitBlock(r.f("{{coral}}[%s]{{/}} ", semVerConstraints)) + if len(semVerConstraints)+2 > bannerWidth { + bannerWidth = len(semVerConstraints) + 2 + } + } r.emitBlock(strings.Repeat("=", bannerWidth)) out := r.f("Random Seed: {{bold}}%d{{/}}", report.SuiteConfig.RandomSeed) @@ -394,7 +404,7 @@ func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, tim case types.ReportEntry: r.emitReportEntry(indent, x) case types.ProgressReport: - r.emitProgressReport(indent, false, x) + r.emitProgressReport(indent, false, false, x) case types.SpecEvent: if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { r.emitSpecEvent(indent, x, isVeryVerbose) @@ -448,7 +458,7 @@ func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failur if !failure.ProgressReport.IsZero() { r.emitBlock("\n") - r.emitProgressReport(indent, false, failure.ProgressReport) + r.emitProgressReport(indent, false, false, failure.ProgressReport) } if failure.AdditionalFailure != nil && includeAdditionalFailure { @@ -464,11 +474,11 @@ func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) } shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) - r.emitProgressReport(1, shouldEmitGW, report) + r.emitProgressReport(1, shouldEmitGW, true, report) r.emitDelimiter(1) } -func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { +func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput, emitGroup bool, report types.ProgressReport) { if report.Message != "" { r.emitBlock(r.fi(indent, report.Message+"\n")) indent += 1 @@ -504,6 +514,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput indent -= 1 } + if r.conf.GithubOutput && emitGroup { + r.emitBlock(r.fi(indent, "::group::Progress Report")) + } + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { r.emit("\n") r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) @@ -550,6 +564,10 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput r.emit(r.fi(indent, "{{gray}}{{bold}}{{underline}}Other Goroutines{{/}}\n")) r.emitGoroutines(indent, otherGoroutines...) } + + if r.conf.GithubOutput && emitGroup { + r.emitBlock(r.fi(indent, "::endgroup::")) + } } func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { @@ -698,8 +716,8 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { } func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { - texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} - texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + texts, locations, labels, semVerConstraints := []string{}, []types.CodeLocation{}, [][]string{}, [][]string{} + texts, locations, labels, semVerConstraints = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...), append(semVerConstraints, report.ContainerHierarchySemVerConstraints...) if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) @@ -707,6 +725,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append(texts, r.f(report.LeafNodeText)) } labels = append(labels, report.LeafNodeLabels) + semVerConstraints = append(semVerConstraints, report.LeafNodeSemVerConstraints) locations = append(locations, report.LeafNodeLocation) failureLocation := report.Failure.FailureNodeLocation @@ -720,6 +739,7 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) locations = append([]types.CodeLocation{failureLocation}, locations...) labels = append([][]string{{}}, labels...) + semVerConstraints = append([][]string{{}}, semVerConstraints...) highlightIndex = 0 case types.FailureNodeInContainer: i := report.Failure.FailureNodeContainerIndex @@ -747,6 +767,9 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(labels[i]) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) } + if len(semVerConstraints[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(semVerConstraints[i], ", ")) + } out += "\n" out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) } @@ -770,6 +793,10 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if len(flattenedLabels) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) } + flattenedSemVerConstraints := report.SemVerConstraints() + if len(flattenedSemVerConstraints) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedSemVerConstraints, ", ")) + } out += "\n" if usePreciseFailureLocation { out += r.f("{{gray}}%s{{/}}", failureLocation) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 562e0f62..828f893f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -36,6 +36,9 @@ type JunitReportConfig struct { // Enable OmitSpecLabels to prevent labels from appearing in the spec name OmitSpecLabels bool + // Enable OmitSpecSemVerConstraints to prevent semantic version constraints from appearing in the spec name + OmitSpecSemVerConstraints bool + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name OmitLeafNodeType bool @@ -169,9 +172,11 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit {"SuiteHasProgrammaticFocus", fmt.Sprintf("%t", report.SuiteHasProgrammaticFocus)}, {"SpecialSuiteFailureReason", strings.Join(report.SpecialSuiteFailureReasons, ",")}, {"SuiteLabels", fmt.Sprintf("[%s]", strings.Join(report.SuiteLabels, ","))}, + {"SuiteSemVerConstraints", fmt.Sprintf("[%s]", strings.Join(report.SuiteSemVerConstraints, ","))}, {"RandomSeed", fmt.Sprintf("%d", report.SuiteConfig.RandomSeed)}, {"RandomizeAllSpecs", fmt.Sprintf("%t", report.SuiteConfig.RandomizeAllSpecs)}, {"LabelFilter", report.SuiteConfig.LabelFilter}, + {"SemVerFilter", report.SuiteConfig.SemVerFilter}, {"FocusStrings", strings.Join(report.SuiteConfig.FocusStrings, ",")}, {"SkipStrings", strings.Join(report.SuiteConfig.SkipStrings, ",")}, {"FocusFiles", strings.Join(report.SuiteConfig.FocusFiles, ";")}, @@ -207,6 +212,10 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit owner = matches[1] } } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 && !config.OmitSpecSemVerConstraints { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = strings.TrimSpace(name) test := JUnitTestCase{ diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index e990ad82..55e1d1f4 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -38,9 +38,13 @@ func GenerateTeamcityReport(report types.Report, dst string) error { name := report.SuiteDescription labels := report.SuiteLabels + semVerConstraints := report.SuiteSemVerConstraints if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } fmt.Fprintf(f, "##teamcity[testSuiteStarted name='%s']\n", tcEscape(name)) for _, spec := range report.SpecReports { name := fmt.Sprintf("[%s]", spec.LeafNodeType) @@ -51,6 +55,10 @@ func GenerateTeamcityReport(report types.Report, dst string) error { if len(labels) > 0 { name = name + " [" + strings.Join(labels, ", ") + "]" } + semVerConstraints := spec.SemVerConstraints() + if len(semVerConstraints) > 0 { + name = name + " [" + strings.Join(semVerConstraints, ", ") + "]" + } name = tcEscape(name) fmt.Fprintf(f, "##teamcity[testStarted name='%s']\n", name) diff --git a/vendor/github.com/onsi/ginkgo/v2/types/around_node.go b/vendor/github.com/onsi/ginkgo/v2/types/around_node.go new file mode 100644 index 00000000..a069e062 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/around_node.go @@ -0,0 +1,56 @@ +package types + +import ( + "context" +) + +type AroundNodeAllowedFuncs interface { + ~func(context.Context, func(context.Context)) | ~func(context.Context) context.Context | ~func() +} +type AroundNodeFunc func(ctx context.Context, body func(ctx context.Context)) + +func AroundNode[F AroundNodeAllowedFuncs](f F, cl CodeLocation) AroundNodeDecorator { + if f == nil { + panic("BuildAroundNode cannot be called with a nil function.") + } + var aroundNodeFunc func(context.Context, func(context.Context)) + switch x := any(f).(type) { + case func(context.Context, func(context.Context)): + aroundNodeFunc = x + case func(context.Context) context.Context: + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + ctx = x(ctx) + body(ctx) + } + case func(): + aroundNodeFunc = func(ctx context.Context, body func(context.Context)) { + x() + body(ctx) + } + } + + return AroundNodeDecorator{ + Body: aroundNodeFunc, + CodeLocation: cl, + } +} + +type AroundNodeDecorator struct { + Body AroundNodeFunc + CodeLocation CodeLocation +} + +type AroundNodes []AroundNodeDecorator + +func (an AroundNodes) Clone() AroundNodes { + out := make(AroundNodes, len(an)) + copy(out, an) + return out +} + +func (an AroundNodes) Append(other ...AroundNodeDecorator) AroundNodes { + out := make(AroundNodes, len(an)+len(other)) + copy(out, an) + copy(out[len(an):], other) + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 2e827efe..b99a9e15 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -24,6 +24,7 @@ type SuiteConfig struct { FocusFiles []string SkipFiles []string LabelFilter string + SemVerFilter string FailOnPending bool FailOnEmpty bool FailFast bool @@ -308,6 +309,8 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.LabelFilter", Name: "label-filter", SectionKey: "filter", UsageArgument: "expression", Usage: "If set, ginkgo will only run specs with labels that match the label-filter. The passed-in expression can include boolean operations (!, &&, ||, ','), groupings via '()', and regular expressions '/regexp/'. e.g. '(cat || dog) && !fruit'"}, + {KeyPath: "S.SemVerFilter", Name: "sem-ver-filter", SectionKey: "filter", UsageArgument: "version", + Usage: "If set, ginkgo will only run specs with semantic version constraints that are satisfied by the provided version. e.g. '2.1.0'"}, {KeyPath: "S.FocusStrings", Name: "focus", SectionKey: "filter", Usage: "If set, ginkgo will only run specs that match this regular expression. Can be specified multiple times, values are ORed."}, {KeyPath: "S.SkipStrings", Name: "skip", SectionKey: "filter", @@ -443,6 +446,13 @@ func VetConfig(flagSet GinkgoFlagSet, suiteConfig SuiteConfig, reporterConfig Re } } + if suiteConfig.SemVerFilter != "" { + _, err := ParseSemVerFilter(suiteConfig.SemVerFilter) + if err != nil { + errors = append(errors, err) + } + } + switch strings.ToLower(suiteConfig.OutputInterceptorMode) { case "", "dup", "swap", "none": default: @@ -573,6 +583,9 @@ var GoBuildFlags = GinkgoFlags{ Usage: "print the name of the temporary work directory and do not delete it when exiting."}, {KeyPath: "Go.X", Name: "x", SectionKey: "go-build", Usage: "print the commands."}, +} + +var GoBuildOFlags = GinkgoFlags{ {KeyPath: "Go.O", Name: "o", SectionKey: "go-build", Usage: "output binary path (including name)."}, } @@ -673,7 +686,7 @@ func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, packageToBuild strin args := []string{"test", "-c", packageToBuild} goArgs, err := GenerateFlagArgs( - GoBuildFlags, + GoBuildFlags.CopyAppend(GoBuildOFlags...), map[string]any{ "Go": &goFlagsConfig, }, @@ -763,6 +776,7 @@ func BuildWatchCommandFlagSet(suiteConfig *SuiteConfig, reporterConfig *Reporter func BuildBuildCommandFlagSet(cliConfig *CLIConfig, goFlagsConfig *GoFlagsConfig) (GinkgoFlagSet, error) { flags := GinkgoCLISharedFlags flags = flags.CopyAppend(GoBuildFlags...) + flags = flags.CopyAppend(GoBuildOFlags...) bindings := map[string]any{ "C": cliConfig, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index c2796b54..59313238 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -432,6 +432,24 @@ func (g ginkgoErrors) InvalidEmptyLabel(cl CodeLocation) error { } } +func (g ginkgoErrors) InvalidSemVerConstraint(semVerConstraint, errMsg string, cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid SemVerConstraint", + Message: fmt.Sprintf("'%s' is an invalid SemVerConstraint: %s", semVerConstraint, errMsg), + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + +func (g ginkgoErrors) InvalidEmptySemVerConstraint(cl CodeLocation) error { + return GinkgoError{ + Heading: "Invalid Empty SemVerConstraint", + Message: "SemVerConstraint cannot be empty", + CodeLocation: cl, + DocLink: "spec-semantic-version-filtering", + } +} + /* Table errors */ func (g ginkgoErrors) MultipleEntryBodyFunctionsForTable(cl CodeLocation) error { return GinkgoError{ diff --git a/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go new file mode 100644 index 00000000..3fc2ed14 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/types/semver_filter.go @@ -0,0 +1,60 @@ +package types + +import ( + "fmt" + + "github.com/Masterminds/semver/v3" +) + +type SemVerFilter func([]string) bool + +func MustParseSemVerFilter(input string) SemVerFilter { + filter, err := ParseSemVerFilter(input) + if err != nil { + panic(err) + } + return filter +} + +func ParseSemVerFilter(filterVersion string) (SemVerFilter, error) { + if filterVersion == "" { + return func(_ []string) bool { return true }, nil + } + + targetVersion, err := semver.NewVersion(filterVersion) + if err != nil { + return nil, fmt.Errorf("invalid filter version: %w", err) + } + + return func(constraints []string) bool { + // unconstrained specs always run + if len(constraints) == 0 { + return true + } + + for _, constraintStr := range constraints { + constraint, err := semver.NewConstraint(constraintStr) + if err != nil { + return false + } + + if !constraint.Check(targetVersion) { + return false + } + } + + return true + }, nil +} + +func ValidateAndCleanupSemVerConstraint(semVerConstraint string, cl CodeLocation) (string, error) { + if len(semVerConstraint) == 0 { + return "", GinkgoErrors.InvalidEmptySemVerConstraint(cl) + } + _, err := semver.NewConstraint(semVerConstraint) + if err != nil { + return "", GinkgoErrors.InvalidSemVerConstraint(semVerConstraint, err.Error(), cl) + } + + return semVerConstraint, nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index ddcbec1b..b8e864a5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "os" + "slices" "sort" "strings" "time" @@ -30,6 +31,9 @@ type Report struct { //SuiteLabels captures any labels attached to the suite by the DSL's RunSpecs() function SuiteLabels []string + //SuiteSemVerConstraints captures any semVerConstraints attached to the suite by the DSL's RunSpecs() function + SuiteSemVerConstraints []string + //SuiteSucceeded captures the success or failure status of the test run //If true, the test run is considered successful. //If false, the test run is considered unsuccessful @@ -129,13 +133,18 @@ type SpecReport struct { // all Describe/Context/When containers in this spec's hierarchy ContainerHierarchyLabels [][]string - // LeafNodeType, LeadNodeLocation, LeafNodeLabels and LeafNodeText capture the NodeType, CodeLocation, and text + // ContainerHierarchySemVerConstraints is a slice containing the semVerConstraints of + // all Describe/Context/When containers in this spec's hierarchy + ContainerHierarchySemVerConstraints [][]string + + // LeafNodeType, LeafNodeLocation, LeafNodeLabels, LeafNodeSemVerConstraints and LeafNodeText capture the NodeType, CodeLocation, and text // of the Ginkgo node being tested (typically an NodeTypeIt node, though this can also be // one of the NodeTypesForSuiteLevelNodes node types) - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string // State captures whether the spec has passed, failed, etc. State SpecState @@ -198,48 +207,52 @@ type SpecReport struct { func (report SpecReport) MarshalJSON() ([]byte, error) { //All this to avoid emitting an empty Failure struct in the JSON out := struct { - ContainerHierarchyTexts []string - ContainerHierarchyLocations []CodeLocation - ContainerHierarchyLabels [][]string - LeafNodeType NodeType - LeafNodeLocation CodeLocation - LeafNodeLabels []string - LeafNodeText string - State SpecState - StartTime time.Time - EndTime time.Time - RunTime time.Duration - ParallelProcess int - Failure *Failure `json:",omitempty"` - NumAttempts int - MaxFlakeAttempts int - MaxMustPassRepeatedly int - CapturedGinkgoWriterOutput string `json:",omitempty"` - CapturedStdOutErr string `json:",omitempty"` - ReportEntries ReportEntries `json:",omitempty"` - ProgressReports []ProgressReport `json:",omitempty"` - AdditionalFailures []AdditionalFailure `json:",omitempty"` - SpecEvents SpecEvents `json:",omitempty"` + ContainerHierarchyTexts []string + ContainerHierarchyLocations []CodeLocation + ContainerHierarchyLabels [][]string + ContainerHierarchySemVerConstraints [][]string + LeafNodeType NodeType + LeafNodeLocation CodeLocation + LeafNodeLabels []string + LeafNodeSemVerConstraints []string + LeafNodeText string + State SpecState + StartTime time.Time + EndTime time.Time + RunTime time.Duration + ParallelProcess int + Failure *Failure `json:",omitempty"` + NumAttempts int + MaxFlakeAttempts int + MaxMustPassRepeatedly int + CapturedGinkgoWriterOutput string `json:",omitempty"` + CapturedStdOutErr string `json:",omitempty"` + ReportEntries ReportEntries `json:",omitempty"` + ProgressReports []ProgressReport `json:",omitempty"` + AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` }{ - ContainerHierarchyTexts: report.ContainerHierarchyTexts, - ContainerHierarchyLocations: report.ContainerHierarchyLocations, - ContainerHierarchyLabels: report.ContainerHierarchyLabels, - LeafNodeType: report.LeafNodeType, - LeafNodeLocation: report.LeafNodeLocation, - LeafNodeLabels: report.LeafNodeLabels, - LeafNodeText: report.LeafNodeText, - State: report.State, - StartTime: report.StartTime, - EndTime: report.EndTime, - RunTime: report.RunTime, - ParallelProcess: report.ParallelProcess, - Failure: nil, - ReportEntries: nil, - NumAttempts: report.NumAttempts, - MaxFlakeAttempts: report.MaxFlakeAttempts, - MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, - CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, - CapturedStdOutErr: report.CapturedStdOutErr, + ContainerHierarchyTexts: report.ContainerHierarchyTexts, + ContainerHierarchyLocations: report.ContainerHierarchyLocations, + ContainerHierarchyLabels: report.ContainerHierarchyLabels, + ContainerHierarchySemVerConstraints: report.ContainerHierarchySemVerConstraints, + LeafNodeType: report.LeafNodeType, + LeafNodeLocation: report.LeafNodeLocation, + LeafNodeLabels: report.LeafNodeLabels, + LeafNodeSemVerConstraints: report.LeafNodeSemVerConstraints, + LeafNodeText: report.LeafNodeText, + State: report.State, + StartTime: report.StartTime, + EndTime: report.EndTime, + RunTime: report.RunTime, + ParallelProcess: report.ParallelProcess, + Failure: nil, + ReportEntries: nil, + NumAttempts: report.NumAttempts, + MaxFlakeAttempts: report.MaxFlakeAttempts, + MaxMustPassRepeatedly: report.MaxMustPassRepeatedly, + CapturedGinkgoWriterOutput: report.CapturedGinkgoWriterOutput, + CapturedStdOutErr: report.CapturedStdOutErr, } if !report.Failure.IsZero() { @@ -287,6 +300,9 @@ func (report SpecReport) FullText() string { if report.LeafNodeText != "" { texts = append(texts, report.LeafNodeText) } + texts = slices.DeleteFunc(texts, func(t string) bool { + return t == "" + }) return strings.Join(texts, " ") } @@ -312,6 +328,28 @@ func (report SpecReport) Labels() []string { return out } +// SemVerConstraints returns a deduped set of all the spec's SemVerConstraints. +func (report SpecReport) SemVerConstraints() []string { + out := []string{} + seen := map[string]bool{} + for _, semVerConstraints := range report.ContainerHierarchySemVerConstraints { + for _, semVerConstraint := range semVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + } + for _, semVerConstraint := range report.LeafNodeSemVerConstraints { + if !seen[semVerConstraint] { + seen[semVerConstraint] = true + out = append(out, semVerConstraint) + } + } + + return out +} + // MatchesLabelFilter returns true if the spec satisfies the passed in label filter query func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { filter, err := ParseLabelFilter(query) @@ -321,6 +359,15 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { return filter(report.Labels()), nil } +// MatchesSemVerFilter returns true if the spec satisfies the passed in label filter query +func (report SpecReport) MatchesSemVerFilter(version string) (bool, error) { + filter, err := ParseSemVerFilter(version) + if err != nil { + return false, err + } + return filter(report.SemVerConstraints()), nil +} + // FileName() returns the name of the file containing the spec func (report SpecReport) FileName() string { return report.LeafNodeLocation.FileName diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 158ac2fd..6aca6efa 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.23.4" +const VERSION = "2.25.3" diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index de9c957c..b7d7309f 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,13 @@ +## 1.38.2 + +- roll back to go 1.23.0 [c404969] + +## 1.38.1 + +### Fixes + +Numerous minor fixes and dependency bumps + ## 1.38.0 ### Features diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 1f03e1f2..fdba34ee 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.38.0" +const GOMEGA_VERSION = "1.38.2" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index a3a646e4..4121505b 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -452,7 +452,7 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch } } else { var fgErr formattedGomegaError - if errors.As(actualErr, &fgErr) { + if errors.As(matcherErr, &fgErr) { message += fgErr.FormattedGomegaError() + "\n" } else { message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr) diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 532fc374..ce74eee4 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -2,6 +2,7 @@ package matchers import ( "bytes" + "errors" "fmt" "github.com/google/go-cmp/cmp" @@ -32,7 +33,7 @@ func (matcher *BeComparableToMatcher) Match(actual any) (success bool, matchErr if err, ok := r.(error); ok { matchErr = err } else if errMsg, ok := r.(string); ok { - matchErr = fmt.Errorf(errMsg) + matchErr = errors.New(errMsg) } } }() diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go index 95057c26..c3da9bd4 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -5,7 +5,7 @@ import ( "strings" "github.com/onsi/gomega/format" - "gopkg.in/yaml.v3" + "go.yaml.in/yaml/v3" ) type MatchYAMLMatcher struct { diff --git a/vendor/github.com/vmware-archive/yaml-patch/LICENSE b/vendor/github.com/vmware-archive/yaml-patch/LICENSE new file mode 100644 index 00000000..8dada3ed --- /dev/null +++ b/vendor/github.com/vmware-archive/yaml-patch/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/vmware-archive/yaml-patch/Makefile b/vendor/github.com/vmware-archive/yaml-patch/Makefile new file mode 100644 index 00000000..9df1fad9 --- /dev/null +++ b/vendor/github.com/vmware-archive/yaml-patch/Makefile @@ -0,0 +1,17 @@ +CGO_ENABLED=0 + +all: windows linux darwin + +linux: + GOOS=linux GOARCH=amd64 go build -o yaml_patch_linux cmd/yaml-patch/*.go + +windows: + GOOS=windows GOARCH=amd64 go build -o yaml_patch.exe cmd/yaml-patch/*.go + +darwin: + GOOS=darwin GOARCH=amd64 go build -o yaml_patch_darwin cmd/yaml-patch/*.go + +clean: + rm yaml_patch_linux + rm yaml_patch.exe + rm yaml_patch_darwin diff --git a/vendor/github.com/vmware-archive/yaml-patch/README.md b/vendor/github.com/vmware-archive/yaml-patch/README.md new file mode 100644 index 00000000..a782e639 --- /dev/null +++ b/vendor/github.com/vmware-archive/yaml-patch/README.md @@ -0,0 +1,86 @@ +# yaml-patch + +`yaml-patch` is a version of Evan Phoenix's +[json-patch](https://github.com/evanphx/json-patch), which is an implementation +of [JavaScript Object Notation (JSON) Patch](https://tools.ietf.org/html/rfc6902), +but for YAML. + + +## Installing + +`go get github.com/krishicks/yaml-patch` + +If you want to use the CLI: + +`go get github.com/krishicks/yaml-patch/cmd/yaml-patch` + +## API + +Given the following RFC6902-ish YAML document, `ops`: + +``` +--- +- op: add + path: /baz/waldo + value: fred +``` + +And the following YAML that is to be modified, `src`: + +``` +--- +foo: bar +baz: + quux: grault +``` + +Decode the ops file into a patch: + +``` +patch, err := yamlpatch.DecodePatch(ops) +// handle err +``` + +Then apply that patch to the document: + +``` +dst, err := patch.Apply(src) +// handle err + +// do something with dst +``` + +### Example + +``` +doc := []byte(`--- +foo: bar +baz: + quux: grault +`) + +ops := []byte(`--- +- op: add + path: /baz/waldo + value: fred +`) + +patch, err := yamlpatch.DecodePatch(ops) +if err != nil { + log.Fatalf("decoding patch failed: %s", err) +} + +bs, err := patch.Apply(doc) +if err != nil { + log.Fatalf("applying patch failed: %s", err) +} + +fmt.Println(string(bs)) +``` + +``` +baz: + quux: grault + waldo: fred +foo: bar +``` diff --git a/vendor/github.com/vmware-archive/yaml-patch/container.go b/vendor/github.com/vmware-archive/yaml-patch/container.go new file mode 100644 index 00000000..bdc22f14 --- /dev/null +++ b/vendor/github.com/vmware-archive/yaml-patch/container.go @@ -0,0 +1,167 @@ +package yamlpatch + +import ( + "fmt" + "strconv" + "strings" +) + +// Container is the interface for performing operations on Nodes +type Container interface { + Get(key string) (*Node, error) + Set(key string, val *Node) error + Add(key string, val *Node) error + Remove(key string) error +} + +type nodeMap map[interface{}]*Node + +func (n *nodeMap) Set(key string, val *Node) error { + (*n)[key] = val + return nil +} + +func (n *nodeMap) Add(key string, val *Node) error { + (*n)[key] = val + return nil +} + +func (n *nodeMap) Get(key string) (*Node, error) { + return (*n)[key], nil +} + +func (n *nodeMap) Remove(key string) error { + _, ok := (*n)[key] + if !ok { + return fmt.Errorf("Unable to remove nonexistent key: %s", key) + } + + delete(*n, key) + return nil +} + +type nodeSlice []*Node + +func (n *nodeSlice) Set(index string, val *Node) error { + i, err := strconv.Atoi(index) + if err != nil { + return err + } + + sz := len(*n) + if i+1 > sz { + sz = i + 1 + } + + ary := make([]*Node, sz) + + cur := *n + + copy(ary, cur) + + if i >= len(ary) { + return fmt.Errorf("Unable to access invalid index: %d", i) + } + + ary[i] = val + + *n = ary + return nil +} + +func (n *nodeSlice) Add(index string, val *Node) error { + if index == "-" { + *n = append(*n, val) + return nil + } + + i, err := strconv.Atoi(index) + if err != nil { + return err + } + + ary := make([]*Node, len(*n)+1) + + cur := *n + + copy(ary[0:i], cur[0:i]) + ary[i] = val + copy(ary[i+1:], cur[i:]) + + *n = ary + return nil +} + +func (n *nodeSlice) Get(index string) (*Node, error) { + i, err := strconv.Atoi(index) + if err != nil { + return nil, err + } + + if i >= 0 && i <= len(*n)-1 { + return (*n)[i], nil + } + + return nil, fmt.Errorf("Unable to access invalid index: %d", i) +} + +func (n *nodeSlice) Remove(index string) error { + i, err := strconv.Atoi(index) + if err != nil { + return err + } + + cur := *n + + if i >= len(cur) { + return fmt.Errorf("Unable to remove invalid index: %d", i) + } + + ary := make([]*Node, len(cur)-1) + + copy(ary[0:i], cur[0:i]) + copy(ary[i:], cur[i+1:]) + + *n = ary + return nil + +} + +func findContainer(c Container, path *OpPath) (Container, string, error) { + parts, key, err := path.Decompose() + if err != nil { + return nil, "", err + } + + foundContainer := c + + for _, part := range parts { + node, err := foundContainer.Get(decodePatchKey(part)) + if err != nil { + return nil, "", err + } + + if node == nil { + return nil, "", fmt.Errorf("path does not exist: %s", path) + } + + foundContainer = node.Container() + } + + return foundContainer, decodePatchKey(key), nil +} + +// From http://tools.ietf.org/html/rfc6901#section-4 : +// +// Evaluation of each reference token begins by decoding any escaped +// character sequence. This is performed by first transforming any +// occurrence of the sequence '~1' to '/', and then transforming any +// occurrence of the sequence '~0' to '~'. + +var ( + rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~") +) + +func decodePatchKey(k string) string { + return rfc6901Decoder.Replace(k) +} diff --git a/vendor/github.com/vmware-archive/yaml-patch/node.go b/vendor/github.com/vmware-archive/yaml-patch/node.go new file mode 100644 index 00000000..4837c8a9 --- /dev/null +++ b/vendor/github.com/vmware-archive/yaml-patch/node.go @@ -0,0 +1,83 @@ +package yamlpatch + +import "reflect" + +// Node holds a YAML document that has not yet been processed into a NodeMap or +// NodeSlice +type Node struct { + raw *interface{} + container Container +} + +// NewNode returns a new Node. It expects a pointer to an interface{} +func NewNode(raw *interface{}) *Node { + return &Node{ + raw: raw, + } +} + +// MarshalYAML implements yaml.Marshaler, and returns the correct interface{} +// to be marshaled +func (n *Node) MarshalYAML() (interface{}, error) { + if n.container != nil { + return n.container, nil + } + + return *n.raw, nil +} + +// UnmarshalYAML implements yaml.Unmarshaler +func (n *Node) UnmarshalYAML(unmarshal func(interface{}) error) error { + var data interface{} + + err := unmarshal(&data) + if err != nil { + return err + } + + n.raw = &data + return nil +} + +// Empty returns whether the raw value is nil +func (n *Node) Empty() bool { + return *n.raw == nil +} + +// Container returns the node as a Container +func (n *Node) Container() Container { + if n.container != nil { + return n.container + } + + switch rt := (*n.raw).(type) { + case []interface{}: + c := make(nodeSlice, len(rt)) + n.container = &c + + for i := range rt { + c[i] = NewNode(&rt[i]) + } + case map[interface{}]interface{}: + c := make(nodeMap, len(rt)) + n.container = &c + + for k := range rt { + v := rt[k] + c[k] = NewNode(&v) + } + } + + return n.container +} + +// Equal compares the values of the raw interfaces that the YAML was +// unmarshaled into +func (n *Node) Equal(other *Node) bool { + return reflect.DeepEqual(*n.raw, *other.raw) +} + +// Value returns the raw value of the node +func (n *Node) Value() interface{} { + return *n.raw +} diff --git a/vendor/github.com/vmware-archive/yaml-patch/operation.go b/vendor/github.com/vmware-archive/yaml-patch/operation.go new file mode 100644 index 00000000..69353c77 --- /dev/null +++ b/vendor/github.com/vmware-archive/yaml-patch/operation.go @@ -0,0 +1,181 @@ +package yamlpatch + +import ( + "errors" + "fmt" + "strings" +) + +// Op is a type alias +type Op string + +// Ops +const ( + opAdd Op = "add" + opRemove Op = "remove" + opReplace Op = "replace" + opMove Op = "move" + opCopy Op = "copy" + opTest Op = "test" +) + +// OpPath is an RFC6902 'pointer' +type OpPath string + +// Decompose returns the pointer's components: +// "/foo" => [], "foo" +// "/foo/1" => ["foo"], "1" +// "/foo/1/bar" => ["foo", "1"], "bar" +func (p *OpPath) Decompose() ([]string, string, error) { + path := string(*p) + + if !strings.HasPrefix(path, "/") { + return nil, "", fmt.Errorf("operation path is missing leading '/': %s", path) + } + + parts := strings.Split(path, "/")[1:] + + return parts[:len(parts)-1], parts[len(parts)-1], nil +} + +// ContainsExtendedSyntax returns whether the OpPath uses the "key=value" +// format, as in "/foo/name=bar", where /foo points at an array that contains +// an object with a key "name" that has a value "bar" +func (p *OpPath) ContainsExtendedSyntax() bool { + return strings.Contains(string(*p), "=") +} + +// String returns the OpPath as a string +func (p *OpPath) String() string { + return string(*p) +} + +// Operation is an RFC6902 'Operation' +// https://tools.ietf.org/html/rfc6902#section-4 +type Operation struct { + Op Op `yaml:"op,omitempty"` + Path OpPath `yaml:"path,omitempty"` + From OpPath `yaml:"from,omitempty"` + Value *Node `yaml:"value,omitempty"` +} + +// Perform executes the operation on the given container +func (o *Operation) Perform(c Container) error { + var err error + + switch o.Op { + case opAdd: + err = tryAdd(c, o) + case opRemove: + err = tryRemove(c, o) + case opReplace: + err = tryReplace(c, o) + case opMove: + err = tryMove(c, o) + case opCopy: + err = tryCopy(c, o) + case opTest: + err = tryTest(c, o) + default: + err = fmt.Errorf("Unexpected op: %s", o.Op) + } + + return err +} + +func tryAdd(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("yamlpatch add operation does not apply: doc is missing path: %s", op.Path) + } + + return con.Add(key, op.Value) +} + +func tryRemove(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("yamlpatch remove operation does not apply: doc is missing path: %s", op.Path) + } + + return con.Remove(key) +} + +func tryReplace(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("yamlpatch replace operation does not apply: doc is missing path: %s", op.Path) + } + + val, err := con.Get(key) + if val == nil || err != nil { + return fmt.Errorf("yamlpatch replace operation does not apply: doc is missing key: %s", op.Path) + } + + return con.Set(key, op.Value) +} + +func tryMove(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.From) + if err != nil { + return fmt.Errorf("yamlpatch move operation does not apply: doc is missing from path: %s", op.From) + } + + val, err := con.Get(key) + if err != nil { + return err + } + + err = con.Remove(key) + if err != nil { + return err + } + + con, key, err = findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("yamlpatch move operation does not apply: doc is missing destination path: %s", op.Path) + } + + return con.Set(key, val) +} + +func tryCopy(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.From) + if err != nil { + return fmt.Errorf("copy operation does not apply: doc is missing from path: %s", op.From) + } + + val, err := con.Get(key) + if err != nil { + return err + } + + con, key, err = findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("copy operation does not apply: doc is missing destination path: %s", op.Path) + } + + return con.Set(key, val) +} + +func tryTest(doc Container, op *Operation) error { + con, key, err := findContainer(doc, &op.Path) + if err != nil { + return fmt.Errorf("test operation does not apply: doc is missing from path: %s", op.From) + } + + val, err := con.Get(key) + if err != nil { + return err + } + + if op.Value.Empty() && val == nil { + return nil + } + + if op.Value.Equal(val) { + return nil + } + + return errors.New("test failed") +} diff --git a/vendor/github.com/vmware-archive/yaml-patch/patch.go b/vendor/github.com/vmware-archive/yaml-patch/patch.go new file mode 100644 index 00000000..910f39eb --- /dev/null +++ b/vendor/github.com/vmware-archive/yaml-patch/patch.go @@ -0,0 +1,60 @@ +package yamlpatch + +import ( + "fmt" + + yaml "gopkg.in/yaml.v2" +) + +// Patch is an ordered collection of operations. +type Patch []Operation + +// DecodePatch decodes the passed YAML document as if it were an RFC 6902 patch +func DecodePatch(bs []byte) (Patch, error) { + var p Patch + + err := yaml.Unmarshal(bs, &p) + if err != nil { + return nil, err + } + + return p, nil +} + +// Apply returns a YAML document that has been mutated per the patch +func (p Patch) Apply(doc []byte) ([]byte, error) { + var iface interface{} + err := yaml.Unmarshal(doc, &iface) + if err != nil { + return nil, fmt.Errorf("failed unmarshaling doc: %s\n\n%s", string(doc), err) + } + + var c Container + c = NewNode(&iface).Container() + + for _, op := range p { + pathfinder := NewPathFinder(c) + if op.Path.ContainsExtendedSyntax() { + paths := pathfinder.Find(string(op.Path)) + if paths == nil { + return nil, fmt.Errorf("could not expand pointer: %s", op.Path) + } + + for _, path := range paths { + newOp := op + newOp.Path = OpPath(path) + err = newOp.Perform(c) + if err != nil { + return nil, err + } + } + } else { + err = op.Perform(c) + if err != nil { + return nil, err + } + } + } + + return yaml.Marshal(c) +} diff --git a/vendor/github.com/vmware-archive/yaml-patch/pathfinder.go b/vendor/github.com/vmware-archive/yaml-patch/pathfinder.go new file mode 100644 index 00000000..06cfb133 --- /dev/null +++ b/vendor/github.com/vmware-archive/yaml-patch/pathfinder.go @@ -0,0 +1,109 @@ +package yamlpatch + +import ( + "fmt" + "strings" +) + +// PathFinder can be used to find RFC6902-standard paths given non-standard +// (key=value) pointer syntax +type PathFinder struct { + root Container +} + +// NewPathFinder takes an interface that represents a YAML document and returns +// a new PathFinder +func NewPathFinder(container Container) *PathFinder { + return &PathFinder{ + root: container, + } +} + +// Find expands the given path into all matching paths, returning the canonical +// versions of those matching paths +func (p *PathFinder) Find(path string) []string { + parts := strings.Split(path, "/") + + if parts[1] == "" { + return []string{"/"} + } + + routes := map[string]Container{ + "": p.root, + } + + for _, part := range parts[1:] { + routes = find(decodePatchKey(part), routes) + } + + var paths []string + for k := range routes { + paths = append(paths, k) + } + + return paths +} + +func find(part string, routes map[string]Container) map[string]Container { + matches := map[string]Container{} + + for prefix, container := range routes { + if part == "-" { + for k := range routes { + matches[fmt.Sprintf("%s/-", k)] = routes[k] + } + return matches + } + + if kv := strings.Split(part, "="); len(kv) == 2 { + if newMatches := findAll(prefix, kv[0], kv[1], container); len(newMatches) > 0 { + matches = newMatches + } + continue + } + + if node, err := container.Get(part); err == nil { + path := fmt.Sprintf("%s/%s", prefix, part) + if node == nil { + matches[path] = container + } else { + matches[path] = node.Container() + } + } + } + + return matches +} + +func findAll(prefix, findKey, findValue string, container Container) map[string]Container { + if container == nil { + return nil + } + + if v, err := container.Get(findKey); err == nil && v != nil { + if vs, ok := v.Value().(string); ok && vs == findValue { + return map[string]Container{ + prefix: container, + } + } + } + + matches := map[string]Container{} + + switch it := container.(type) { + case *nodeMap: + for k, v := range *it { + for route, match := range findAll(fmt.Sprintf("%s/%s", prefix, k), findKey, findValue, v.Container()) { + matches[route] = match + } + } + case *nodeSlice: + for i, v := range *it { + for route, match := range findAll(fmt.Sprintf("%s/%d", prefix, i), findKey, findValue, v.Container()) { + matches[route] = match + } + } + } + + return matches +} diff --git a/vendor/github.com/vmware-archive/yaml-patch/placeholder_wrapper.go b/vendor/github.com/vmware-archive/yaml-patch/placeholder_wrapper.go new file mode 100644 index 00000000..cbcc22a8 --- /dev/null +++ b/vendor/github.com/vmware-archive/yaml-patch/placeholder_wrapper.go @@ -0,0 +1,51 @@ +package yamlpatch + +import ( + "fmt" + "regexp" +) + +// PlaceholderWrapper can be used to wrap placeholders that make YAML invalid +// in single quotes to make otherwise valid YAML +type PlaceholderWrapper struct { + LeftSide string + RightSide string + unwrappedRegex *regexp.Regexp + wrappedRegex *regexp.Regexp +} + +// NewPlaceholderWrapper returns a new PlaceholderWrapper which knows how to +// wrap and unwrap the provided left and right sides of a placeholder, e.g. {{ +// and }} +func NewPlaceholderWrapper(left, right string) *PlaceholderWrapper { + escapedLeft := regexp.QuoteMeta(left) + escapedRight := regexp.QuoteMeta(right) + unwrappedRegex := regexp.MustCompile(`\s` + escapedLeft + `([^` + escapedRight + `]+)` + escapedRight) + wrappedRegex := regexp.MustCompile(`\s'` + escapedLeft + `([^` + escapedRight + `]+)` + escapedRight + `'`) + + return &PlaceholderWrapper{ + LeftSide: left, + RightSide: right, + unwrappedRegex: unwrappedRegex, + wrappedRegex: wrappedRegex, + } +} + +// Wrap the placeholder in single quotes to make it valid YAML +func (w *PlaceholderWrapper) Wrap(input []byte) []byte { + if !w.unwrappedRegex.Match(input) { + return input + } + + return w.unwrappedRegex.ReplaceAll(input, []byte(fmt.Sprintf(` '%s$1%s'`, w.LeftSide, w.RightSide))) +} + +// Unwrap the single quotes from the placeholder to make it invalid YAML +// (again) +func (w *PlaceholderWrapper) Unwrap(input []byte) []byte { + if !w.wrappedRegex.Match(input) { + return input + } + + return w.wrappedRegex.ReplaceAll(input, []byte(fmt.Sprintf(` %s$1%s`, w.LeftSide, w.RightSide))) +} diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 6c18ea23..ea5ae629 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -11,8 +11,6 @@ // requires Go 1.6 or later) // // See https://http2.github.io/ for more information on HTTP/2. -// -// See https://http2.golang.org/ for a test server running this code. package http2 // import "golang.org/x/net/http2" import ( diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 6ab02b6c..d1c8b264 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -349,6 +349,9 @@ struct ltchars { #define _HIDIOCGRAWPHYS HIDIOCGRAWPHYS(_HIDIOCGRAWPHYS_LEN) #define _HIDIOCGRAWUNIQ HIDIOCGRAWUNIQ(_HIDIOCGRAWUNIQ_LEN) +// Renamed in v6.16, commit c6d732c38f93 ("net: ethtool: remove duplicate defines for family info") +#define ETHTOOL_FAMILY_NAME ETHTOOL_GENL_NAME +#define ETHTOOL_FAMILY_VERSION ETHTOOL_GENL_VERSION ' includes_NetBSD=' diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 798f61ad..7838ca5d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -602,14 +602,9 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI return } -// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) const minIovec = 8 func Readv(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = readv(fd, iovecs) @@ -618,9 +613,6 @@ func Readv(fd int, iovs [][]byte) (n int, err error) { } func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) n, err = preadv(fd, iovecs, offset) @@ -629,10 +621,6 @@ func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) { } func Writev(fd int, iovs [][]byte) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -644,10 +632,6 @@ func Writev(fd int, iovs [][]byte) (n int, err error) { } func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) { - if !darwinKernelVersionMin(11, 0, 0) { - return 0, ENOSYS - } - iovecs := make([]Iovec, 0, minIovec) iovecs = appendBytes(iovecs, iovs) if raceenabled { @@ -707,45 +691,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) { } } -func darwinMajorMinPatch() (maj, min, patch int, err error) { - var un Utsname - err = Uname(&un) - if err != nil { - return - } - - var mmp [3]int - c := 0 -Loop: - for _, b := range un.Release[:] { - switch { - case b >= '0' && b <= '9': - mmp[c] = 10*mmp[c] + int(b-'0') - case b == '.': - c++ - if c > 2 { - return 0, 0, 0, ENOTSUP - } - case b == 0: - break Loop - default: - return 0, 0, 0, ENOTSUP - } - } - if c != 2 { - return 0, 0, 0, ENOTSUP - } - return mmp[0], mmp[1], mmp[2], nil -} - -func darwinKernelVersionMin(maj, min, patch int) bool { - actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch() - if err != nil { - return false - } - return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch) -} - +//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error) //sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) //sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index 9e7a6c5a..b6db27d9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -328,6 +328,8 @@ const ( AUDIT_KERNEL = 0x7d0 AUDIT_KERNEL_OTHER = 0x524 AUDIT_KERN_MODULE = 0x532 + AUDIT_LANDLOCK_ACCESS = 0x58f + AUDIT_LANDLOCK_DOMAIN = 0x590 AUDIT_LAST_FEATURE = 0x1 AUDIT_LAST_KERN_ANOM_MSG = 0x707 AUDIT_LAST_USER_MSG = 0x4af @@ -492,6 +494,7 @@ const ( BPF_F_BEFORE = 0x8 BPF_F_ID = 0x20 BPF_F_NETFILTER_IP_DEFRAG = 0x1 + BPF_F_PREORDER = 0x40 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REDIRECT_FLAGS = 0x19 BPF_F_REPLACE = 0x4 @@ -528,6 +531,7 @@ const ( BPF_LDX = 0x1 BPF_LEN = 0x80 BPF_LL_OFF = -0x200000 + BPF_LOAD_ACQ = 0x100 BPF_LSH = 0x60 BPF_MAJOR_VERSION = 0x1 BPF_MAXINSNS = 0x1000 @@ -555,6 +559,7 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_ST = 0x2 + BPF_STORE_REL = 0x110 BPF_STX = 0x3 BPF_SUB = 0x10 BPF_TAG_SIZE = 0x8 @@ -844,9 +849,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2025-01-17)" + DM_VERSION_EXTRA = "-ioctl (2025-04-28)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x31 + DM_VERSION_MINOR = 0x32 DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -937,9 +942,6 @@ const ( EPOLL_CTL_MOD = 0x3 EPOLL_IOC_TYPE = 0x8a EROFS_SUPER_MAGIC_V1 = 0xe0f5e1e2 - ESP_V4_FLOW = 0xa - ESP_V6_FLOW = 0xc - ETHER_FLOW = 0x12 ETHTOOL_BUSINFO_LEN = 0x20 ETHTOOL_EROMVERS_LEN = 0x20 ETHTOOL_FAMILY_NAME = "ethtool" @@ -1213,6 +1215,7 @@ const ( FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2 FAN_EVENT_INFO_TYPE_ERROR = 0x5 FAN_EVENT_INFO_TYPE_FID = 0x1 + FAN_EVENT_INFO_TYPE_MNT = 0x7 FAN_EVENT_INFO_TYPE_NEW_DFID_NAME = 0xc FAN_EVENT_INFO_TYPE_OLD_DFID_NAME = 0xa FAN_EVENT_INFO_TYPE_PIDFD = 0x4 @@ -1231,9 +1234,12 @@ const ( FAN_MARK_IGNORED_SURV_MODIFY = 0x40 FAN_MARK_IGNORE_SURV = 0x440 FAN_MARK_INODE = 0x0 + FAN_MARK_MNTNS = 0x110 FAN_MARK_MOUNT = 0x10 FAN_MARK_ONLYDIR = 0x8 FAN_MARK_REMOVE = 0x2 + FAN_MNT_ATTACH = 0x1000000 + FAN_MNT_DETACH = 0x2000000 FAN_MODIFY = 0x2 FAN_MOVE = 0xc0 FAN_MOVED_FROM = 0x40 @@ -1255,6 +1261,7 @@ const ( FAN_REPORT_DIR_FID = 0x400 FAN_REPORT_FD_ERROR = 0x2000 FAN_REPORT_FID = 0x200 + FAN_REPORT_MNT = 0x4000 FAN_REPORT_NAME = 0x800 FAN_REPORT_PIDFD = 0x80 FAN_REPORT_TARGET_FID = 0x1000 @@ -1274,6 +1281,7 @@ const ( FIB_RULE_PERMANENT = 0x1 FIB_RULE_UNRESOLVED = 0x4 FIDEDUPERANGE = 0xc0189436 + FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED = 0x1 FSCRYPT_KEY_DESCRIPTOR_SIZE = 0x8 FSCRYPT_KEY_DESC_PREFIX = "fscrypt:" FSCRYPT_KEY_DESC_PREFIX_SIZE = 0x8 @@ -1582,7 +1590,6 @@ const ( IPV6_DONTFRAG = 0x3e IPV6_DROP_MEMBERSHIP = 0x15 IPV6_DSTOPTS = 0x3b - IPV6_FLOW = 0x11 IPV6_FREEBIND = 0x4e IPV6_HDRINCL = 0x24 IPV6_HOPLIMIT = 0x34 @@ -1633,7 +1640,6 @@ const ( IPV6_TRANSPARENT = 0x4b IPV6_UNICAST_HOPS = 0x10 IPV6_UNICAST_IF = 0x4c - IPV6_USER_FLOW = 0xe IPV6_V6ONLY = 0x1a IPV6_VERSION = 0x60 IPV6_VERSION_MASK = 0xf0 @@ -1695,7 +1701,6 @@ const ( IP_TTL = 0x2 IP_UNBLOCK_SOURCE = 0x25 IP_UNICAST_IF = 0x32 - IP_USER_FLOW = 0xd IP_XFRM_POLICY = 0x11 ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 @@ -1817,7 +1822,11 @@ const ( LANDLOCK_ACCESS_FS_WRITE_FILE = 0x2 LANDLOCK_ACCESS_NET_BIND_TCP = 0x1 LANDLOCK_ACCESS_NET_CONNECT_TCP = 0x2 + LANDLOCK_CREATE_RULESET_ERRATA = 0x2 LANDLOCK_CREATE_RULESET_VERSION = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_NEW_EXEC_ON = 0x2 + LANDLOCK_RESTRICT_SELF_LOG_SAME_EXEC_OFF = 0x1 + LANDLOCK_RESTRICT_SELF_LOG_SUBDOMAINS_OFF = 0x4 LANDLOCK_SCOPE_ABSTRACT_UNIX_SOCKET = 0x1 LANDLOCK_SCOPE_SIGNAL = 0x2 LINUX_REBOOT_CMD_CAD_OFF = 0x0 @@ -2493,6 +2502,10 @@ const ( PR_FP_EXC_UND = 0x40000 PR_FP_MODE_FR = 0x1 PR_FP_MODE_FRE = 0x2 + PR_FUTEX_HASH = 0x4e + PR_FUTEX_HASH_GET_IMMUTABLE = 0x3 + PR_FUTEX_HASH_GET_SLOTS = 0x2 + PR_FUTEX_HASH_SET_SLOTS = 0x1 PR_GET_AUXV = 0x41555856 PR_GET_CHILD_SUBREAPER = 0x25 PR_GET_DUMPABLE = 0x3 @@ -2652,6 +2665,10 @@ const ( PR_TAGGED_ADDR_ENABLE = 0x1 PR_TASK_PERF_EVENTS_DISABLE = 0x1f PR_TASK_PERF_EVENTS_ENABLE = 0x20 + PR_TIMER_CREATE_RESTORE_IDS = 0x4d + PR_TIMER_CREATE_RESTORE_IDS_GET = 0x2 + PR_TIMER_CREATE_RESTORE_IDS_OFF = 0x0 + PR_TIMER_CREATE_RESTORE_IDS_ON = 0x1 PR_TIMING_STATISTICAL = 0x0 PR_TIMING_TIMESTAMP = 0x1 PR_TSC_ENABLE = 0x1 @@ -2732,6 +2749,7 @@ const ( PTRACE_SETREGSET = 0x4205 PTRACE_SETSIGINFO = 0x4203 PTRACE_SETSIGMASK = 0x420b + PTRACE_SET_SYSCALL_INFO = 0x4212 PTRACE_SET_SYSCALL_USER_DISPATCH_CONFIG = 0x4210 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 @@ -2982,6 +3000,7 @@ const ( RTPROT_NTK = 0xf RTPROT_OPENR = 0x63 RTPROT_OSPF = 0xbc + RTPROT_OVN = 0x54 RTPROT_RA = 0x9 RTPROT_REDIRECT = 0x1 RTPROT_RIP = 0xbd @@ -3336,7 +3355,7 @@ const ( TASKSTATS_GENL_NAME = "TASKSTATS" TASKSTATS_GENL_VERSION = 0x1 TASKSTATS_TYPE_MAX = 0x6 - TASKSTATS_VERSION = 0xf + TASKSTATS_VERSION = 0x10 TCIFLUSH = 0x0 TCIOFF = 0x2 TCIOFLUSH = 0x2 @@ -3406,8 +3425,6 @@ const ( TCP_TX_DELAY = 0x25 TCP_ULP = 0x1f TCP_USER_TIMEOUT = 0x12 - TCP_V4_FLOW = 0x1 - TCP_V6_FLOW = 0x5 TCP_WINDOW_CLAMP = 0xa TCP_ZEROCOPY_RECEIVE = 0x23 TFD_TIMER_ABSTIME = 0x1 @@ -3530,8 +3547,6 @@ const ( UDP_NO_CHECK6_RX = 0x66 UDP_NO_CHECK6_TX = 0x65 UDP_SEGMENT = 0x67 - UDP_V4_FLOW = 0x2 - UDP_V6_FLOW = 0x6 UMOUNT_NOFOLLOW = 0x8 USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff @@ -3574,7 +3589,7 @@ const ( WDIOS_TEMPPANIC = 0x4 WDIOS_UNKNOWN = -0x1 WEXITED = 0x4 - WGALLOWEDIP_A_MAX = 0x3 + WGALLOWEDIP_A_MAX = 0x4 WGDEVICE_A_MAX = 0x8 WGPEER_A_MAX = 0xa WG_CMD_MAX = 0x1 @@ -3688,6 +3703,7 @@ const ( XDP_SHARED_UMEM = 0x1 XDP_STATISTICS = 0x7 XDP_TXMD_FLAGS_CHECKSUM = 0x2 + XDP_TXMD_FLAGS_LAUNCH_TIME = 0x4 XDP_TXMD_FLAGS_TIMESTAMP = 0x1 XDP_TX_METADATA = 0x2 XDP_TX_RING = 0x3 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index a8c421e2..1c37f9fb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -360,6 +361,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 9a88d181..6f54d34a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -361,6 +362,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 7cb6a867..783ec5c1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -366,6 +367,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index d0ecd2c5..ca83d3ba 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go index 7a2940ae..607e611c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -353,6 +354,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index d14ca8f2..b9cb5bd3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 2da1bac1..65b078a6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 28727514..5298a303 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 7f287b54..7bc557c8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -359,6 +360,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x11 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x12 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go index 7e5f9e6a..152399bb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -414,6 +415,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 37c87952..1a1ce240 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 52201336..4231a1fb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -68,6 +68,7 @@ const ( CS8 = 0x300 CSIZE = 0x300 CSTOPB = 0x400 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x40 @@ -418,6 +419,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x14 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x15 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 4bfe2b5b..21c0e952 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -350,6 +351,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index e3cffb86..f00d1cd7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -68,6 +68,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0xfd12 ECCGETLAYOUT = 0x81484d11 ECCGETSTATS = 0x80104d12 ECHOCTL = 0x200 @@ -422,6 +423,7 @@ const ( SO_OOBINLINE = 0xa SO_PASSCRED = 0x10 SO_PASSPIDFD = 0x4c + SO_PASSRIGHTS = 0x53 SO_PASSSEC = 0x22 SO_PEEK_OFF = 0x2a SO_PEERCRED = 0x11 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index c219c8db..bc8d539e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -71,6 +71,7 @@ const ( CS8 = 0x30 CSIZE = 0x30 CSTOPB = 0x40 + DM_MPATH_PROBE_PATHS = 0x2000fd12 ECCGETLAYOUT = 0x41484d11 ECCGETSTATS = 0x40104d12 ECHOCTL = 0x200 @@ -461,6 +462,7 @@ const ( SO_OOBINLINE = 0x100 SO_PASSCRED = 0x2 SO_PASSPIDFD = 0x55 + SO_PASSRIGHTS = 0x5c SO_PASSSEC = 0x1f SO_PEEK_OFF = 0x26 SO_PEERCRED = 0x40 diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index c79aaff3..aca56ee4 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -462,4 +462,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 5eb45069..2ea1ef58 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -385,4 +385,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 05e50297..d22c8af3 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -426,4 +426,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 38c53ec5..5ee264ae 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -329,4 +329,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go index 31d2e71a..f9f03ebf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go @@ -325,4 +325,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index f4184a33..87c2118e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 05b99622..391ad102 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 43a256e9..56561577 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -376,4 +376,5 @@ const ( SYS_GETXATTRAT = 5464 SYS_LISTXATTRAT = 5465 SYS_REMOVEXATTRAT = 5466 + SYS_OPEN_TREE_ATTR = 5467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index eea5ddfc..0482b52e 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -446,4 +446,5 @@ const ( SYS_GETXATTRAT = 4464 SYS_LISTXATTRAT = 4465 SYS_REMOVEXATTRAT = 4466 + SYS_OPEN_TREE_ATTR = 4467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go index 0d777bfb..71806f08 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go @@ -453,4 +453,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index b4463650..e35a7105 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 0c7d21c1..2aea4767 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -425,4 +425,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index 84053916..6c9bb4e5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -330,4 +330,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index fcf1b790..680bc991 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -391,4 +391,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 52d15b5f..620f2710 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -404,4 +404,5 @@ const ( SYS_GETXATTRAT = 464 SYS_LISTXATTRAT = 465 SYS_REMOVEXATTRAT = 466 + SYS_OPEN_TREE_ATTR = 467 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 8bcac283..cd236443 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -115,7 +115,9 @@ type Statx_t struct { Atomic_write_unit_max uint32 Atomic_write_segments_max uint32 Dio_read_offset_align uint32 - _ [9]uint64 + Atomic_write_unit_max_opt uint32 + _ [1]uint32 + _ [8]uint64 } type Fsid struct { @@ -199,7 +201,8 @@ type FscryptAddKeyArg struct { Key_spec FscryptKeySpecifier Raw_size uint32 Key_id uint32 - _ [8]uint32 + Flags uint32 + _ [7]uint32 } type FscryptRemoveKeyArg struct { @@ -2317,6 +2320,11 @@ const ( NFT_CT_AVGPKT = 0x10 NFT_CT_ZONE = 0x11 NFT_CT_EVENTMASK = 0x12 + NFT_CT_SRC_IP = 0x13 + NFT_CT_DST_IP = 0x14 + NFT_CT_SRC_IP6 = 0x15 + NFT_CT_DST_IP6 = 0x16 + NFT_CT_ID = 0x17 NFTA_CT_UNSPEC = 0x0 NFTA_CT_DREG = 0x1 NFTA_CT_KEY = 0x2 @@ -2597,8 +2605,8 @@ const ( SOF_TIMESTAMPING_BIND_PHC = 0x8000 SOF_TIMESTAMPING_OPT_ID_TCP = 0x10000 - SOF_TIMESTAMPING_LAST = 0x20000 - SOF_TIMESTAMPING_MASK = 0x3ffff + SOF_TIMESTAMPING_LAST = 0x40000 + SOF_TIMESTAMPING_MASK = 0x7ffff SCM_TSTAMP_SND = 0x0 SCM_TSTAMP_SCHED = 0x1 @@ -4044,7 +4052,7 @@ const ( ETHTOOL_A_TSINFO_PHC_INDEX = 0x5 ETHTOOL_A_TSINFO_STATS = 0x6 ETHTOOL_A_TSINFO_HWTSTAMP_PROVIDER = 0x7 - ETHTOOL_A_TSINFO_MAX = 0x7 + ETHTOOL_A_TSINFO_MAX = 0x9 ETHTOOL_A_CABLE_TEST_UNSPEC = 0x0 ETHTOOL_A_CABLE_TEST_HEADER = 0x1 ETHTOOL_A_CABLE_TEST_MAX = 0x1 @@ -4130,6 +4138,19 @@ const ( ETHTOOL_A_TUNNEL_INFO_MAX = 0x2 ) +const ( + TCP_V4_FLOW = 0x1 + UDP_V4_FLOW = 0x2 + TCP_V6_FLOW = 0x5 + UDP_V6_FLOW = 0x6 + ESP_V4_FLOW = 0xa + ESP_V6_FLOW = 0xc + IP_USER_FLOW = 0xd + IPV6_USER_FLOW = 0xe + IPV6_FLOW = 0x11 + ETHER_FLOW = 0x12 +) + const SPEED_UNKNOWN = -0x1 type EthtoolDrvinfo struct { @@ -4780,7 +4801,7 @@ const ( NL80211_ATTR_MAC_HINT = 0xc8 NL80211_ATTR_MAC_MASK = 0xd7 NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca - NL80211_ATTR_MAX = 0x150 + NL80211_ATTR_MAX = 0x151 NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4 NL80211_ATTR_MAX_CSA_COUNTERS = 0xce NL80211_ATTR_MAX_HW_TIMESTAMP_PEERS = 0x143 @@ -5414,7 +5435,7 @@ const ( NL80211_FREQUENCY_ATTR_GO_CONCURRENT = 0xf NL80211_FREQUENCY_ATTR_INDOOR_ONLY = 0xe NL80211_FREQUENCY_ATTR_IR_CONCURRENT = 0xf - NL80211_FREQUENCY_ATTR_MAX = 0x21 + NL80211_FREQUENCY_ATTR_MAX = 0x22 NL80211_FREQUENCY_ATTR_MAX_TX_POWER = 0x6 NL80211_FREQUENCY_ATTR_NO_10MHZ = 0x11 NL80211_FREQUENCY_ATTR_NO_160MHZ = 0xc @@ -5530,7 +5551,7 @@ const ( NL80211_MAX_SUPP_SELECTORS = 0x80 NL80211_MBSSID_CONFIG_ATTR_EMA = 0x5 NL80211_MBSSID_CONFIG_ATTR_INDEX = 0x3 - NL80211_MBSSID_CONFIG_ATTR_MAX = 0x5 + NL80211_MBSSID_CONFIG_ATTR_MAX = 0x6 NL80211_MBSSID_CONFIG_ATTR_MAX_EMA_PROFILE_PERIODICITY = 0x2 NL80211_MBSSID_CONFIG_ATTR_MAX_INTERFACES = 0x1 NL80211_MBSSID_CONFIG_ATTR_TX_IFINDEX = 0x4 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 62db85f6..485f2d3a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -282,19 +282,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -330,17 +324,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -348,10 +336,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 7d89d648..ecbd1ad8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -300,16 +300,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -344,27 +338,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 9c0b39ee..02f0463a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -91,7 +91,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -273,19 +273,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -321,17 +315,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -339,10 +327,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index de9c7ff3..6f4d400d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -279,16 +279,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -323,27 +317,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go index 2336bd2b..cd532cfa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_loong64.go @@ -280,16 +280,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -324,27 +318,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 4711f0be..41336208 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -278,19 +278,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,17 +320,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -344,10 +332,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index ab99a34b..eaa37eb7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -282,16 +282,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,27 +320,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 04c9866e..98ae6a1e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -282,16 +282,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,27 +320,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 60aa69f6..cae19615 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -278,19 +278,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -326,17 +320,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -344,10 +332,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go index cb4fad78..6ce3b4e0 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc.go @@ -90,7 +90,7 @@ type Stat_t struct { Gid uint32 Rdev uint64 _ uint16 - _ [4]byte + _ [6]byte Size int64 Blksize int32 _ [4]byte @@ -285,19 +285,13 @@ type Taskstats struct { Ac_exitcode uint32 Ac_flag uint8 Ac_nice uint8 - _ [4]byte + _ [6]byte Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,17 +327,11 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 _ [4]byte Ac_tgetime uint64 @@ -351,10 +339,22 @@ type Taskstats struct { Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 60272cfc..c7429c6a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -289,16 +289,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,27 +327,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 3f5b91bc..4bf4baf4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -289,16 +289,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -333,27 +327,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 51550f15..e9709d70 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -307,16 +307,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]uint8 @@ -351,27 +345,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 3239e50e..fb44268c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -302,16 +302,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -346,27 +340,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index faf20027..9c38265c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -284,16 +284,10 @@ type Taskstats struct { Ac_nice uint8 Cpu_count uint64 Cpu_delay_total uint64 - Cpu_delay_max uint64 - Cpu_delay_min uint64 Blkio_count uint64 Blkio_delay_total uint64 - Blkio_delay_max uint64 - Blkio_delay_min uint64 Swapin_count uint64 Swapin_delay_total uint64 - Swapin_delay_max uint64 - Swapin_delay_min uint64 Cpu_run_real_total uint64 Cpu_run_virtual_total uint64 Ac_comm [32]int8 @@ -328,27 +322,33 @@ type Taskstats struct { Cpu_scaled_run_real_total uint64 Freepages_count uint64 Freepages_delay_total uint64 - Freepages_delay_max uint64 - Freepages_delay_min uint64 Thrashing_count uint64 Thrashing_delay_total uint64 - Thrashing_delay_max uint64 - Thrashing_delay_min uint64 Ac_btime64 uint64 Compact_count uint64 Compact_delay_total uint64 - Compact_delay_max uint64 - Compact_delay_min uint64 Ac_tgid uint32 Ac_tgetime uint64 Ac_exe_dev uint64 Ac_exe_inode uint64 Wpcopy_count uint64 Wpcopy_delay_total uint64 - Wpcopy_delay_max uint64 - Wpcopy_delay_min uint64 Irq_count uint64 Irq_delay_total uint64 + Cpu_delay_max uint64 + Cpu_delay_min uint64 + Blkio_delay_max uint64 + Blkio_delay_min uint64 + Swapin_delay_max uint64 + Swapin_delay_min uint64 + Freepages_delay_max uint64 + Freepages_delay_min uint64 + Thrashing_delay_max uint64 + Thrashing_delay_min uint64 + Compact_delay_max uint64 + Compact_delay_min uint64 + Wpcopy_delay_max uint64 + Wpcopy_delay_min uint64 Irq_delay_max uint64 Irq_delay_min uint64 } diff --git a/vendor/golang.org/x/term/term_windows.go b/vendor/golang.org/x/term/term_windows.go index df6bf948..0ddd81c0 100644 --- a/vendor/golang.org/x/term/term_windows.go +++ b/vendor/golang.org/x/term/term_windows.go @@ -20,12 +20,14 @@ func isTerminal(fd int) bool { return err == nil } +// This is intended to be used on a console input handle. +// See https://learn.microsoft.com/en-us/windows/console/setconsolemode func makeRaw(fd int) (*State, error) { var st uint32 if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { return nil, err } - raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT) raw |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { return nil, err diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go index 13e9a64a..bddb2e2a 100644 --- a/vendor/golang.org/x/term/terminal.go +++ b/vendor/golang.org/x/term/terminal.go @@ -146,6 +146,7 @@ const ( keyCtrlD = 4 keyCtrlU = 21 keyEnter = '\r' + keyLF = '\n' keyEscape = 27 keyBackspace = 127 keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota @@ -497,7 +498,7 @@ func (t *Terminal) historyAdd(entry string) { // handleKey processes the given key and, optionally, returns a line of text // that the user has entered. func (t *Terminal) handleKey(key rune) (line string, ok bool) { - if t.pasteActive && key != keyEnter { + if t.pasteActive && key != keyEnter && key != keyLF { t.addKeyToLine(key) return } @@ -567,7 +568,7 @@ func (t *Terminal) handleKey(key rune) (line string, ok bool) { t.setLine(runes, len(runes)) } } - case keyEnter: + case keyEnter, keyLF: t.moveCursorToPos(len(t.line)) t.queue([]rune("\r\n")) line = string(t.line) @@ -812,6 +813,10 @@ func (t *Terminal) readLine() (line string, err error) { if !t.pasteActive { lineIsPasted = false } + // If we have CR, consume LF if present (CRLF sequence) to avoid returning an extra empty line. + if key == keyEnter && len(rest) > 0 && rest[0] == keyLF { + rest = rest[1:] + } line, lineOk = t.handleKey(key) } if len(rest) > 0 { diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go index 89f5097b..0fb4e7ee 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -113,7 +113,7 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod // childrenOf elides the FuncType node beneath FuncDecl. // Add it back here for TypeParams, Params, Results, // all FieldLists). But we don't add it back for the "func" token - // even though it is is the tree at FuncDecl.Type.Func. + // even though it is the tree at FuncDecl.Type.Func. if decl, ok := node.(*ast.FuncDecl); ok { if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv { path = append(path, decl.Type) diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index bc44b2c8..a703cdfc 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -85,6 +85,7 @@ type event struct { // TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). // Type can be recovered from the sole bit in typ. +// [Tried this, wasn't faster. --adonovan] // Preorder visits all the nodes of the files supplied to New in // depth-first order. It calls f(n) for each node n before it visits diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go index be0f990a..9852331a 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -12,8 +12,6 @@ package inspector import ( "go/ast" "math" - - _ "unsafe" ) const ( diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index f1931d10..366aab6b 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -76,6 +76,8 @@ uninterpreted to Load, so that it can interpret them according to the conventions of the underlying build system. See the Example function for typical usage. +See also [golang.org/x/tools/go/packages/internal/linecount] +for an example application. # The driver protocol diff --git a/vendor/golang.org/x/tools/go/ssa/builder.go b/vendor/golang.org/x/tools/go/ssa/builder.go index fe713a77..a5ef8fb4 100644 --- a/vendor/golang.org/x/tools/go/ssa/builder.go +++ b/vendor/golang.org/x/tools/go/ssa/builder.go @@ -138,7 +138,7 @@ type builder struct { finished int // finished is the length of the prefix of fns containing built functions. // The task of building shared functions within the builder. - // Shared functions are ones the the builder may either create or lookup. + // Shared functions are ones the builder may either create or lookup. // These may be built by other builders in parallel. // The task is done when the builder has finished iterating, and it // waits for all shared functions to finish building. diff --git a/vendor/golang.org/x/tools/internal/imports/source_modindex.go b/vendor/golang.org/x/tools/internal/imports/source_modindex.go index 05229f06..ca745d4a 100644 --- a/vendor/golang.org/x/tools/internal/imports/source_modindex.go +++ b/vendor/golang.org/x/tools/internal/imports/source_modindex.go @@ -15,6 +15,10 @@ import ( // This code is here rather than in the modindex package // to avoid import loops +// TODO(adonovan): this code is only used by a test in this package. +// Can we delete it? Or is there a plan to call NewIndexSource from +// cmd/goimports? + // implements Source using modindex, so only for module cache. // // this is perhaps over-engineered. A new Index is read at first use. @@ -22,8 +26,8 @@ import ( // is read if the index changed. It is not clear the Mutex is needed. type IndexSource struct { modcachedir string - mutex sync.Mutex - ix *modindex.Index + mu sync.Mutex + index *modindex.Index // (access via getIndex) expires time.Time } @@ -39,13 +43,14 @@ func (s *IndexSource) LoadPackageNames(ctx context.Context, srcDir string, paths } func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, missing References) ([]*Result, error) { - if err := s.maybeReadIndex(); err != nil { + index, err := s.getIndex() + if err != nil { return nil, err } var cs []modindex.Candidate for pkg, nms := range missing { for nm := range nms { - x := s.ix.Lookup(pkg, nm, false) + x := index.Lookup(pkg, nm, false) cs = append(cs, x...) } } @@ -74,30 +79,22 @@ func (s *IndexSource) ResolveReferences(ctx context.Context, filename string, mi return ans, nil } -func (s *IndexSource) maybeReadIndex() error { - s.mutex.Lock() - defer s.mutex.Unlock() - - var readIndex bool - if time.Now().After(s.expires) { - ok, err := modindex.Update(s.modcachedir) - if err != nil { - return err - } - if ok { - readIndex = true - } - } +func (s *IndexSource) getIndex() (*modindex.Index, error) { + s.mu.Lock() + defer s.mu.Unlock() - if readIndex || s.ix == nil { - ix, err := modindex.ReadIndex(s.modcachedir) + // (s.index = nil => s.expires is zero, + // so the first condition is strictly redundant. + // But it makes the postcondition very clear.) + if s.index == nil || time.Now().After(s.expires) { + index, err := modindex.Update(s.modcachedir) if err != nil { - return err + return nil, err } - s.ix = ix - // for now refresh every 15 minutes - s.expires = time.Now().Add(time.Minute * 15) + s.index = index + s.expires = index.ValidAt.Add(15 * time.Minute) // (refresh period) } + // Inv: s.index != nil - return nil + return s.index, nil } diff --git a/vendor/golang.org/x/tools/internal/modindex/directories.go b/vendor/golang.org/x/tools/internal/modindex/directories.go index 2faa6ce0..9a963744 100644 --- a/vendor/golang.org/x/tools/internal/modindex/directories.go +++ b/vendor/golang.org/x/tools/internal/modindex/directories.go @@ -10,7 +10,6 @@ import ( "os" "path/filepath" "regexp" - "slices" "strings" "sync" "time" @@ -20,50 +19,48 @@ import ( ) type directory struct { - path Relpath + path string // relative to GOMODCACHE importPath string version string // semantic version - syms []symbol } -// byImportPath groups the directories by import path, -// sorting the ones with the same import path by semantic version, -// most recent first. -func byImportPath(dirs []Relpath) (map[string][]*directory, error) { - ans := make(map[string][]*directory) // key is import path - for _, d := range dirs { - ip, sv, err := DirToImportPathVersion(d) +// bestDirByImportPath returns the best directory for each import +// path, where "best" means most recent semantic version. These import +// paths are inferred from the GOMODCACHE-relative dir names in dirs. +func bestDirByImportPath(dirs []string) (map[string]directory, error) { + dirsByPath := make(map[string]directory) + for _, dir := range dirs { + importPath, version, err := dirToImportPathVersion(dir) if err != nil { return nil, err } - ans[ip] = append(ans[ip], &directory{ - path: d, - importPath: ip, - version: sv, - }) - } - for k, v := range ans { - semanticSort(v) - ans[k] = v + new := directory{ + path: dir, + importPath: importPath, + version: version, + } + if old, ok := dirsByPath[importPath]; !ok || compareDirectory(new, old) < 0 { + dirsByPath[importPath] = new + } } - return ans, nil + return dirsByPath, nil } -// sort the directories by semantic version, latest first -func semanticSort(v []*directory) { - slices.SortFunc(v, func(l, r *directory) int { - if n := semver.Compare(l.version, r.version); n != 0 { - return -n // latest first - } - return strings.Compare(string(l.path), string(r.path)) - }) +// compareDirectory defines an ordering of path@version directories, +// by descending version, then by ascending path. +func compareDirectory(x, y directory) int { + if sign := -semver.Compare(x.version, y.version); sign != 0 { + return sign // latest first + } + return strings.Compare(string(x.path), string(y.path)) } // modCacheRegexp splits a relpathpath into module, module version, and package. var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) -// DirToImportPathVersion computes import path and semantic version -func DirToImportPathVersion(dir Relpath) (string, string, error) { +// dirToImportPathVersion computes import path and semantic version +// from a GOMODCACHE-relative directory name. +func dirToImportPathVersion(dir string) (string, string, error) { m := modCacheRegexp.FindStringSubmatch(string(dir)) // m[1] is the module path // m[2] is the version major.minor.patch(-
 that contains the name
+// Package modindex contains code for building and searching an
+// [Index] of the Go module cache.
+package modindex
+
+// The directory containing the index, returned by
+// [IndexDir], contains a file index-name- that contains the name
 // of the current index. We believe writing that short file is atomic.
-// ReadIndex reads that file to get the file name of the index.
+// [Read] reads that file to get the file name of the index.
 // WriteIndex writes an index with a unique name and then
 // writes that name into a new version of index-name-.
 // ( stands for the CurrentVersion of the index format.)
-package modindex
 
 import (
+	"maps"
+	"os"
 	"path/filepath"
 	"slices"
 	"strings"
@@ -21,144 +25,95 @@ import (
 	"golang.org/x/mod/semver"
 )
 
-// Create always creates a new index for the go module cache that is in cachedir.
-func Create(cachedir string) error {
-	_, err := indexModCache(cachedir, true)
-	return err
-}
-
-// Update the index for the go module cache that is in cachedir,
-// If there is no existing index it will build one.
-// If there are changed directories since the last index, it will
-// write a new one and return true. Otherwise it returns false.
-func Update(cachedir string) (bool, error) {
-	return indexModCache(cachedir, false)
+// Update updates the index for the specified Go
+// module cache directory, creating it as needed.
+// On success it returns the current index.
+func Update(gomodcache string) (*Index, error) {
+	prev, err := Read(gomodcache)
+	if err != nil {
+		if !os.IsNotExist(err) {
+			return nil, err
+		}
+		prev = nil
+	}
+	return update(gomodcache, prev)
 }
 
-// indexModCache writes an index current as of when it is called.
-// If clear is true the index is constructed from all of GOMODCACHE
-// otherwise the index is constructed from the last previous index
-// and the updates to the cache. It returns true if it wrote an index,
-// false otherwise.
-func indexModCache(cachedir string, clear bool) (bool, error) {
-	cachedir, err := filepath.Abs(cachedir)
+// update builds, writes, and returns the current index.
+//
+// If old is nil, the new index is built from all of GOMODCACHE;
+// otherwise it is built from the old index plus cache updates
+// since the previous index's time.
+func update(gomodcache string, old *Index) (*Index, error) {
+	gomodcache, err := filepath.Abs(gomodcache)
 	if err != nil {
-		return false, err
+		return nil, err
 	}
-	cd := Abspath(cachedir)
-	future := time.Now().Add(24 * time.Hour) // safely in the future
-	ok, err := modindexTimed(future, cd, clear)
+	new, changed, err := build(gomodcache, old)
 	if err != nil {
-		return false, err
+		return nil, err
 	}
-	return ok, nil
-}
-
-// modindexTimed writes an index current as of onlyBefore.
-// If clear is true the index is constructed from all of GOMODCACHE
-// otherwise the index is constructed from the last previous index
-// and all the updates to the cache before onlyBefore.
-// It returns true if it wrote a new index, false if it wrote nothing.
-func modindexTimed(onlyBefore time.Time, cachedir Abspath, clear bool) (bool, error) {
-	var curIndex *Index
-	if !clear {
-		var err error
-		curIndex, err = ReadIndex(string(cachedir))
-		if clear && err != nil {
-			return false, err
+	if old == nil || changed {
+		if err := write(gomodcache, new); err != nil {
+			return nil, err
 		}
-		// TODO(pjw): check that most of those directories still exist
-	}
-	cfg := &work{
-		onlyBefore: onlyBefore,
-		oldIndex:   curIndex,
-		cacheDir:   cachedir,
-	}
-	if curIndex != nil {
-		cfg.onlyAfter = curIndex.Changed
-	}
-	if err := cfg.buildIndex(); err != nil {
-		return false, err
 	}
-	if len(cfg.newIndex.Entries) == 0 && curIndex != nil {
-		// no changes from existing curIndex, don't write a new index
-		return false, nil
-	}
-	if err := cfg.writeIndex(); err != nil {
-		return false, err
-	}
-	return true, nil
-}
-
-type work struct {
-	onlyBefore time.Time // do not use directories later than this
-	onlyAfter  time.Time // only interested in directories after this
-	// directories from before onlyAfter come from oldIndex
-	oldIndex *Index
-	newIndex *Index
-	cacheDir Abspath
+	return new, nil
 }
 
-func (w *work) buildIndex() error {
-	// The effective date of the new index should be at least
-	// slightly earlier than when the directories are scanned
-	// so set it now.
-	w.newIndex = &Index{Changed: time.Now(), Cachedir: w.cacheDir}
-	dirs := findDirs(string(w.cacheDir), w.onlyAfter, w.onlyBefore)
-	if len(dirs) == 0 {
-		return nil
+// build returns a new index for the specified Go module cache (an
+// absolute path).
+//
+// If an old index is provided, only directories more recent than it
+// that it are scanned; older directories are provided by the old
+// Index.
+//
+// The boolean result indicates whether new entries were found.
+func build(gomodcache string, old *Index) (*Index, bool, error) {
+	// Set the time window.
+	var start time.Time // = dawn of time
+	if old != nil {
+		start = old.ValidAt
 	}
-	newdirs, err := byImportPath(dirs)
+	now := time.Now()
+	end := now.Add(24 * time.Hour) // safely in the future
+
+	// Enumerate GOMODCACHE package directories.
+	// Choose the best (latest) package for each import path.
+	pkgDirs := findDirs(gomodcache, start, end)
+	dirByPath, err := bestDirByImportPath(pkgDirs)
 	if err != nil {
-		return err
+		return nil, false, err
 	}
-	// for each import path it might occur only in newdirs,
-	// only in w.oldIndex, or in both.
-	// If it occurs in both, use the semantically later one
-	if w.oldIndex != nil {
-		for _, e := range w.oldIndex.Entries {
-			found, ok := newdirs[e.ImportPath]
-			if !ok {
-				w.newIndex.Entries = append(w.newIndex.Entries, e)
-				continue // use this one, there is no new one
-			}
-			if semver.Compare(found[0].version, e.Version) > 0 {
-				// use the new one
-			} else {
-				// use the old one, forget the new one
-				w.newIndex.Entries = append(w.newIndex.Entries, e)
-				delete(newdirs, e.ImportPath)
+
+	// For each import path it might occur only in
+	// dirByPath, only in old, or in both.
+	// If both, use the semantically later one.
+	var entries []Entry
+	if old != nil {
+		for _, entry := range old.Entries {
+			dir, ok := dirByPath[entry.ImportPath]
+			if !ok || semver.Compare(dir.version, entry.Version) <= 0 {
+				// New dir is missing or not more recent; use old entry.
+				entries = append(entries, entry)
+				delete(dirByPath, entry.ImportPath)
 			}
 		}
 	}
-	// get symbol information for all the new diredtories
-	getSymbols(w.cacheDir, newdirs)
-	// assemble the new index entries
-	for k, v := range newdirs {
-		d := v[0]
-		pkg, names := processSyms(d.syms)
-		if pkg == "" {
-			continue // PJW: does this ever happen?
-		}
-		entry := Entry{
-			PkgName:    pkg,
-			Dir:        d.path,
-			ImportPath: k,
-			Version:    d.version,
-			Names:      names,
-		}
-		w.newIndex.Entries = append(w.newIndex.Entries, entry)
-	}
-	// sort the entries in the new index
-	slices.SortFunc(w.newIndex.Entries, func(l, r Entry) int {
-		if n := strings.Compare(l.PkgName, r.PkgName); n != 0 {
+
+	// Extract symbol information for all the new directories.
+	newEntries := extractSymbols(gomodcache, maps.Values(dirByPath))
+	entries = append(entries, newEntries...)
+	slices.SortFunc(entries, func(x, y Entry) int {
+		if n := strings.Compare(x.PkgName, y.PkgName); n != 0 {
 			return n
 		}
-		return strings.Compare(l.ImportPath, r.ImportPath)
+		return strings.Compare(x.ImportPath, y.ImportPath)
 	})
-	return nil
-}
 
-func (w *work) writeIndex() error {
-	return writeIndex(w.cacheDir, w.newIndex)
+	return &Index{
+		GOMODCACHE: gomodcache,
+		ValidAt:    now, // time before the directories were scanned
+		Entries:    entries,
+	}, len(newEntries) > 0, nil
 }
diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
index 31a502c5..fe24db9b 100644
--- a/vendor/golang.org/x/tools/internal/modindex/symbols.go
+++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -10,11 +10,13 @@ import (
 	"go/parser"
 	"go/token"
 	"go/types"
+	"iter"
 	"os"
 	"path/filepath"
 	"runtime"
 	"slices"
 	"strings"
+	"sync"
 
 	"golang.org/x/sync/errgroup"
 )
@@ -34,41 +36,65 @@ type symbol struct {
 	sig  string // signature information, for F
 }
 
-// find the symbols for the best directories
-func getSymbols(cd Abspath, dirs map[string][]*directory) {
+// extractSymbols returns a (new, unordered) array of Entries, one for
+// each provided package directory, describing its exported symbols.
+func extractSymbols(cwd string, dirs iter.Seq[directory]) []Entry {
+	var (
+		mu      sync.Mutex
+		entries []Entry
+	)
+
 	var g errgroup.Group
 	g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
-	for _, vv := range dirs {
-		// throttling some day?
-		d := vv[0]
+	for dir := range dirs {
 		g.Go(func() error {
-			thedir := filepath.Join(string(cd), string(d.path))
+			thedir := filepath.Join(cwd, string(dir.path))
 			mode := parser.SkipObjectResolution | parser.ParseComments
 
-			fi, err := os.ReadDir(thedir)
+			// Parse all Go files in dir and extract symbols.
+			dirents, err := os.ReadDir(thedir)
 			if err != nil {
 				return nil // log this someday?
 			}
-			for _, fx := range fi {
-				if !strings.HasSuffix(fx.Name(), ".go") || strings.HasSuffix(fx.Name(), "_test.go") {
+			var syms []symbol
+			for _, dirent := range dirents {
+				if !strings.HasSuffix(dirent.Name(), ".go") ||
+					strings.HasSuffix(dirent.Name(), "_test.go") {
 					continue
 				}
-				fname := filepath.Join(thedir, fx.Name())
+				fname := filepath.Join(thedir, dirent.Name())
 				tr, err := parser.ParseFile(token.NewFileSet(), fname, nil, mode)
 				if err != nil {
 					continue // ignore errors, someday log them?
 				}
-				d.syms = append(d.syms, getFileExports(tr)...)
+				syms = append(syms, getFileExports(tr)...)
+			}
+
+			// Create an entry for the package.
+			pkg, names := processSyms(syms)
+			if pkg != "" {
+				mu.Lock()
+				defer mu.Unlock()
+				entries = append(entries, Entry{
+					PkgName:    pkg,
+					Dir:        dir.path,
+					ImportPath: dir.importPath,
+					Version:    dir.version,
+					Names:      names,
+				})
 			}
+
 			return nil
 		})
 	}
-	g.Wait()
+	g.Wait() // ignore error
+
+	return entries
 }
 
 func getFileExports(f *ast.File) []symbol {
 	pkg := f.Name.Name
-	if pkg == "main" {
+	if pkg == "main" || pkg == "" {
 		return nil
 	}
 	var ans []symbol
@@ -202,17 +228,18 @@ func processSyms(syms []symbol) (string, []string) {
 	pkg := syms[0].pkg
 	var names []string
 	for _, s := range syms {
+		if s.pkg != pkg {
+			// Symbols came from two files in same dir
+			// with different package declarations.
+			continue
+		}
 		var nx string
-		if s.pkg == pkg {
-			if s.sig != "" {
-				nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
-			} else {
-				nx = fmt.Sprintf("%s %s", s.name, s.kind)
-			}
-			names = append(names, nx)
+		if s.sig != "" {
+			nx = fmt.Sprintf("%s %s %s", s.name, s.kind, s.sig)
 		} else {
-			continue // PJW: do we want to keep track of these?
+			nx = fmt.Sprintf("%s %s", s.name, s.kind)
 		}
+		names = append(names, nx)
 	}
 	return pkg, names
 }
diff --git a/vendor/golang.org/x/tools/internal/modindex/types.go b/vendor/golang.org/x/tools/internal/modindex/types.go
deleted file mode 100644
index ece44886..00000000
--- a/vendor/golang.org/x/tools/internal/modindex/types.go
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package modindex
-
-import (
-	"strings"
-)
-
-// some special types to avoid confusions
-
-// distinguish various types of directory names. It's easy to get confused.
-type Abspath string // absolute paths
-type Relpath string // paths with GOMODCACHE prefix removed
-
-func toRelpath(cachedir Abspath, s string) Relpath {
-	if strings.HasPrefix(s, string(cachedir)) {
-		if s == string(cachedir) {
-			return Relpath("")
-		}
-		return Relpath(s[len(cachedir)+1:])
-	}
-	return Relpath(s)
-}
diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
index e942bc98..743bfb81 100644
--- a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
+++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go
@@ -371,7 +371,31 @@ func ConsumeVarint(b []byte) (v uint64, n int) {
 func SizeVarint(v uint64) int {
 	// This computes 1 + (bits.Len64(v)-1)/7.
 	// 9/64 is a good enough approximation of 1/7
-	return int(9*uint32(bits.Len64(v))+64) / 64
+	//
+	// The Go compiler can translate the bits.LeadingZeros64 call into the LZCNT
+	// instruction, which is very fast on CPUs from the last few years. The
+	// specific way of expressing the calculation matches C++ Protobuf, see
+	// https://godbolt.org/z/4P3h53oM4 for the C++ code and how gcc/clang
+	// optimize that function for GOAMD64=v1 and GOAMD64=v3 (-march=haswell).
+
+	// By OR'ing v with 1, we guarantee that v is never 0, without changing the
+	// result of SizeVarint. LZCNT is not defined for 0, meaning the compiler
+	// needs to add extra instructions to handle that case.
+	//
+	// The Go compiler currently (go1.24.4) does not make use of this knowledge.
+	// This opportunity (removing the XOR instruction, which handles the 0 case)
+	// results in a small (1%) performance win across CPU architectures.
+	//
+	// Independently of avoiding the 0 case, we need the v |= 1 line because
+	// it allows the Go compiler to eliminate an extra XCHGL barrier.
+	v |= 1
+
+	// It would be clearer to write log2value := 63 - uint32(...), but
+	// writing uint32(...) ^ 63 is much more efficient (-14% ARM, -20% Intel).
+	// Proof of identity for our value range [0..63]:
+	// https://go.dev/play/p/Pdn9hEWYakX
+	log2value := uint32(bits.LeadingZeros64(v)) ^ 63
+	return int((log2value*9 + (64 + 9)) / 64)
 }
 
 // AppendFixed32 appends v to b as a little-endian uint32.
diff --git a/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb b/vendor/google.golang.org/protobuf/internal/editiondefaults/editions_defaults.binpb
index 323829da1477e4496d664b2a1092a9f9cec275d4..04696351eeeef14cbbd69fd1f4250530b1fbfd56 100644
GIT binary patch
literal 154
zcmX}mI}(5(3Eat$;}$;v

literal 146
zcmX}mF%Ezr3X5(&e%rBRTLK{CjOa+)E@2mYkk=mEF7
B6)FG#

diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
index b08b7183..a0aad277 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/editions.go
@@ -72,6 +72,9 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
 			case genid.FeatureSet_EnforceNamingStyle_field_number:
 				// EnforceNamingStyle is enforced in protoc, languages other than C++
 				// are not supposed to do anything with this feature.
+			case genid.FeatureSet_DefaultSymbolVisibility_field_number:
+				// DefaultSymbolVisibility is enforced in protoc, runtimes should not
+				// inspect this value.
 			default:
 				panic(fmt.Sprintf("unkown field number %d while unmarshalling FeatureSet", num))
 			}
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/presence.go b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
new file mode 100644
index 00000000..a12ec979
--- /dev/null
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/presence.go
@@ -0,0 +1,33 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package filedesc
+
+import "google.golang.org/protobuf/reflect/protoreflect"
+
+// UsePresenceForField reports whether the presence bitmap should be used for
+// the specified field.
+func UsePresenceForField(fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
+	switch {
+	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
+		// Oneof fields never use the presence bitmap.
+		//
+		// Synthetic oneofs are an exception: Those are used to implement proto3
+		// optional fields and hence should follow non-oneof field semantics.
+		return false, false
+
+	case fd.IsMap():
+		// Map-typed fields never use the presence bitmap.
+		return false, false
+
+	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
+		// Lazy fields always use the presence bitmap (only messages can be lazy).
+		isLazy := fd.(interface{ IsLazy() bool }).IsLazy()
+		return isLazy, isLazy
+
+	default:
+		// If the field has presence, use the presence bitmap.
+		return fd.HasPresence(), false
+	}
+}
diff --git a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
index 39524782..950a6a32 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/descriptor_gen.go
@@ -34,6 +34,19 @@ const (
 	Edition_EDITION_MAX_enum_value             = 2147483647
 )
 
+// Full and short names for google.protobuf.SymbolVisibility.
+const (
+	SymbolVisibility_enum_fullname = "google.protobuf.SymbolVisibility"
+	SymbolVisibility_enum_name     = "SymbolVisibility"
+)
+
+// Enum values for google.protobuf.SymbolVisibility.
+const (
+	SymbolVisibility_VISIBILITY_UNSET_enum_value  = 0
+	SymbolVisibility_VISIBILITY_LOCAL_enum_value  = 1
+	SymbolVisibility_VISIBILITY_EXPORT_enum_value = 2
+)
+
 // Names for google.protobuf.FileDescriptorSet.
 const (
 	FileDescriptorSet_message_name     protoreflect.Name     = "FileDescriptorSet"
@@ -65,6 +78,7 @@ const (
 	FileDescriptorProto_Dependency_field_name       protoreflect.Name = "dependency"
 	FileDescriptorProto_PublicDependency_field_name protoreflect.Name = "public_dependency"
 	FileDescriptorProto_WeakDependency_field_name   protoreflect.Name = "weak_dependency"
+	FileDescriptorProto_OptionDependency_field_name protoreflect.Name = "option_dependency"
 	FileDescriptorProto_MessageType_field_name      protoreflect.Name = "message_type"
 	FileDescriptorProto_EnumType_field_name         protoreflect.Name = "enum_type"
 	FileDescriptorProto_Service_field_name          protoreflect.Name = "service"
@@ -79,6 +93,7 @@ const (
 	FileDescriptorProto_Dependency_field_fullname       protoreflect.FullName = "google.protobuf.FileDescriptorProto.dependency"
 	FileDescriptorProto_PublicDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.public_dependency"
 	FileDescriptorProto_WeakDependency_field_fullname   protoreflect.FullName = "google.protobuf.FileDescriptorProto.weak_dependency"
+	FileDescriptorProto_OptionDependency_field_fullname protoreflect.FullName = "google.protobuf.FileDescriptorProto.option_dependency"
 	FileDescriptorProto_MessageType_field_fullname      protoreflect.FullName = "google.protobuf.FileDescriptorProto.message_type"
 	FileDescriptorProto_EnumType_field_fullname         protoreflect.FullName = "google.protobuf.FileDescriptorProto.enum_type"
 	FileDescriptorProto_Service_field_fullname          protoreflect.FullName = "google.protobuf.FileDescriptorProto.service"
@@ -96,6 +111,7 @@ const (
 	FileDescriptorProto_Dependency_field_number       protoreflect.FieldNumber = 3
 	FileDescriptorProto_PublicDependency_field_number protoreflect.FieldNumber = 10
 	FileDescriptorProto_WeakDependency_field_number   protoreflect.FieldNumber = 11
+	FileDescriptorProto_OptionDependency_field_number protoreflect.FieldNumber = 15
 	FileDescriptorProto_MessageType_field_number      protoreflect.FieldNumber = 4
 	FileDescriptorProto_EnumType_field_number         protoreflect.FieldNumber = 5
 	FileDescriptorProto_Service_field_number          protoreflect.FieldNumber = 6
@@ -124,6 +140,7 @@ const (
 	DescriptorProto_Options_field_name        protoreflect.Name = "options"
 	DescriptorProto_ReservedRange_field_name  protoreflect.Name = "reserved_range"
 	DescriptorProto_ReservedName_field_name   protoreflect.Name = "reserved_name"
+	DescriptorProto_Visibility_field_name     protoreflect.Name = "visibility"
 
 	DescriptorProto_Name_field_fullname           protoreflect.FullName = "google.protobuf.DescriptorProto.name"
 	DescriptorProto_Field_field_fullname          protoreflect.FullName = "google.protobuf.DescriptorProto.field"
@@ -135,6 +152,7 @@ const (
 	DescriptorProto_Options_field_fullname        protoreflect.FullName = "google.protobuf.DescriptorProto.options"
 	DescriptorProto_ReservedRange_field_fullname  protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_range"
 	DescriptorProto_ReservedName_field_fullname   protoreflect.FullName = "google.protobuf.DescriptorProto.reserved_name"
+	DescriptorProto_Visibility_field_fullname     protoreflect.FullName = "google.protobuf.DescriptorProto.visibility"
 )
 
 // Field numbers for google.protobuf.DescriptorProto.
@@ -149,6 +167,7 @@ const (
 	DescriptorProto_Options_field_number        protoreflect.FieldNumber = 7
 	DescriptorProto_ReservedRange_field_number  protoreflect.FieldNumber = 9
 	DescriptorProto_ReservedName_field_number   protoreflect.FieldNumber = 10
+	DescriptorProto_Visibility_field_number     protoreflect.FieldNumber = 11
 )
 
 // Names for google.protobuf.DescriptorProto.ExtensionRange.
@@ -388,12 +407,14 @@ const (
 	EnumDescriptorProto_Options_field_name       protoreflect.Name = "options"
 	EnumDescriptorProto_ReservedRange_field_name protoreflect.Name = "reserved_range"
 	EnumDescriptorProto_ReservedName_field_name  protoreflect.Name = "reserved_name"
+	EnumDescriptorProto_Visibility_field_name    protoreflect.Name = "visibility"
 
 	EnumDescriptorProto_Name_field_fullname          protoreflect.FullName = "google.protobuf.EnumDescriptorProto.name"
 	EnumDescriptorProto_Value_field_fullname         protoreflect.FullName = "google.protobuf.EnumDescriptorProto.value"
 	EnumDescriptorProto_Options_field_fullname       protoreflect.FullName = "google.protobuf.EnumDescriptorProto.options"
 	EnumDescriptorProto_ReservedRange_field_fullname protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_range"
 	EnumDescriptorProto_ReservedName_field_fullname  protoreflect.FullName = "google.protobuf.EnumDescriptorProto.reserved_name"
+	EnumDescriptorProto_Visibility_field_fullname    protoreflect.FullName = "google.protobuf.EnumDescriptorProto.visibility"
 )
 
 // Field numbers for google.protobuf.EnumDescriptorProto.
@@ -403,6 +424,7 @@ const (
 	EnumDescriptorProto_Options_field_number       protoreflect.FieldNumber = 3
 	EnumDescriptorProto_ReservedRange_field_number protoreflect.FieldNumber = 4
 	EnumDescriptorProto_ReservedName_field_number  protoreflect.FieldNumber = 5
+	EnumDescriptorProto_Visibility_field_number    protoreflect.FieldNumber = 6
 )
 
 // Names for google.protobuf.EnumDescriptorProto.EnumReservedRange.
@@ -1008,32 +1030,35 @@ const (
 
 // Field names for google.protobuf.FeatureSet.
 const (
-	FeatureSet_FieldPresence_field_name         protoreflect.Name = "field_presence"
-	FeatureSet_EnumType_field_name              protoreflect.Name = "enum_type"
-	FeatureSet_RepeatedFieldEncoding_field_name protoreflect.Name = "repeated_field_encoding"
-	FeatureSet_Utf8Validation_field_name        protoreflect.Name = "utf8_validation"
-	FeatureSet_MessageEncoding_field_name       protoreflect.Name = "message_encoding"
-	FeatureSet_JsonFormat_field_name            protoreflect.Name = "json_format"
-	FeatureSet_EnforceNamingStyle_field_name    protoreflect.Name = "enforce_naming_style"
-
-	FeatureSet_FieldPresence_field_fullname         protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
-	FeatureSet_EnumType_field_fullname              protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
-	FeatureSet_RepeatedFieldEncoding_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
-	FeatureSet_Utf8Validation_field_fullname        protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
-	FeatureSet_MessageEncoding_field_fullname       protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
-	FeatureSet_JsonFormat_field_fullname            protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
-	FeatureSet_EnforceNamingStyle_field_fullname    protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style"
+	FeatureSet_FieldPresence_field_name           protoreflect.Name = "field_presence"
+	FeatureSet_EnumType_field_name                protoreflect.Name = "enum_type"
+	FeatureSet_RepeatedFieldEncoding_field_name   protoreflect.Name = "repeated_field_encoding"
+	FeatureSet_Utf8Validation_field_name          protoreflect.Name = "utf8_validation"
+	FeatureSet_MessageEncoding_field_name         protoreflect.Name = "message_encoding"
+	FeatureSet_JsonFormat_field_name              protoreflect.Name = "json_format"
+	FeatureSet_EnforceNamingStyle_field_name      protoreflect.Name = "enforce_naming_style"
+	FeatureSet_DefaultSymbolVisibility_field_name protoreflect.Name = "default_symbol_visibility"
+
+	FeatureSet_FieldPresence_field_fullname           protoreflect.FullName = "google.protobuf.FeatureSet.field_presence"
+	FeatureSet_EnumType_field_fullname                protoreflect.FullName = "google.protobuf.FeatureSet.enum_type"
+	FeatureSet_RepeatedFieldEncoding_field_fullname   protoreflect.FullName = "google.protobuf.FeatureSet.repeated_field_encoding"
+	FeatureSet_Utf8Validation_field_fullname          protoreflect.FullName = "google.protobuf.FeatureSet.utf8_validation"
+	FeatureSet_MessageEncoding_field_fullname         protoreflect.FullName = "google.protobuf.FeatureSet.message_encoding"
+	FeatureSet_JsonFormat_field_fullname              protoreflect.FullName = "google.protobuf.FeatureSet.json_format"
+	FeatureSet_EnforceNamingStyle_field_fullname      protoreflect.FullName = "google.protobuf.FeatureSet.enforce_naming_style"
+	FeatureSet_DefaultSymbolVisibility_field_fullname protoreflect.FullName = "google.protobuf.FeatureSet.default_symbol_visibility"
 )
 
 // Field numbers for google.protobuf.FeatureSet.
 const (
-	FeatureSet_FieldPresence_field_number         protoreflect.FieldNumber = 1
-	FeatureSet_EnumType_field_number              protoreflect.FieldNumber = 2
-	FeatureSet_RepeatedFieldEncoding_field_number protoreflect.FieldNumber = 3
-	FeatureSet_Utf8Validation_field_number        protoreflect.FieldNumber = 4
-	FeatureSet_MessageEncoding_field_number       protoreflect.FieldNumber = 5
-	FeatureSet_JsonFormat_field_number            protoreflect.FieldNumber = 6
-	FeatureSet_EnforceNamingStyle_field_number    protoreflect.FieldNumber = 7
+	FeatureSet_FieldPresence_field_number           protoreflect.FieldNumber = 1
+	FeatureSet_EnumType_field_number                protoreflect.FieldNumber = 2
+	FeatureSet_RepeatedFieldEncoding_field_number   protoreflect.FieldNumber = 3
+	FeatureSet_Utf8Validation_field_number          protoreflect.FieldNumber = 4
+	FeatureSet_MessageEncoding_field_number         protoreflect.FieldNumber = 5
+	FeatureSet_JsonFormat_field_number              protoreflect.FieldNumber = 6
+	FeatureSet_EnforceNamingStyle_field_number      protoreflect.FieldNumber = 7
+	FeatureSet_DefaultSymbolVisibility_field_number protoreflect.FieldNumber = 8
 )
 
 // Full and short names for google.protobuf.FeatureSet.FieldPresence.
@@ -1128,6 +1153,27 @@ const (
 	FeatureSet_STYLE_LEGACY_enum_value                 = 2
 )
 
+// Names for google.protobuf.FeatureSet.VisibilityFeature.
+const (
+	FeatureSet_VisibilityFeature_message_name     protoreflect.Name     = "VisibilityFeature"
+	FeatureSet_VisibilityFeature_message_fullname protoreflect.FullName = "google.protobuf.FeatureSet.VisibilityFeature"
+)
+
+// Full and short names for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_fullname = "google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility"
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_enum_name     = "DefaultSymbolVisibility"
+)
+
+// Enum values for google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility.
+const (
+	FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN_enum_value = 0
+	FeatureSet_VisibilityFeature_EXPORT_ALL_enum_value                        = 1
+	FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL_enum_value                  = 2
+	FeatureSet_VisibilityFeature_LOCAL_ALL_enum_value                         = 3
+	FeatureSet_VisibilityFeature_STRICT_enum_value                            = 4
+)
+
 // Names for google.protobuf.FeatureSetDefaults.
 const (
 	FeatureSetDefaults_message_name     protoreflect.Name     = "FeatureSetDefaults"
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
index 41c1f74e..bdad12a9 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -11,6 +11,7 @@ import (
 
 	"google.golang.org/protobuf/encoding/protowire"
 	"google.golang.org/protobuf/internal/encoding/messageset"
+	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/internal/order"
 	"google.golang.org/protobuf/reflect/protoreflect"
 	piface "google.golang.org/protobuf/runtime/protoiface"
@@ -80,7 +81,7 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf
 		// permit us to skip over definitely-unset fields at marshal time.
 
 		var hasPresence bool
-		hasPresence, cf.isLazy = usePresenceForField(si, fd)
+		hasPresence, cf.isLazy = filedesc.UsePresenceForField(fd)
 
 		if hasPresence {
 			cf.presenceIndex, mi.presenceSize = presenceIndex(mi.Desc, fd)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
index dd55e8e0..5a439daa 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -11,6 +11,7 @@ import (
 	"strings"
 	"sync/atomic"
 
+	"google.golang.org/protobuf/internal/filedesc"
 	"google.golang.org/protobuf/reflect/protoreflect"
 )
 
@@ -53,7 +54,7 @@ func opaqueInitHook(mi *MessageInfo) bool {
 		fd := fds.Get(i)
 		fs := si.fieldsByNumber[fd.Number()]
 		var fi fieldInfo
-		usePresence, _ := usePresenceForField(si, fd)
+		usePresence, _ := filedesc.UsePresenceForField(fd)
 
 		switch {
 		case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
@@ -343,17 +344,15 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
 			if p.IsNil() {
 				return false
 			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
 				return false
 			}
-			rv := sp.AsValueOf(fs.Type.Elem())
 			return rv.Elem().Len() > 0
 		},
 		clear: func(p pointer) {
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if !sp.IsNil() {
-				rv := sp.AsValueOf(fs.Type.Elem())
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if !rv.IsNil() {
 				rv.Elem().Set(reflect.Zero(rv.Type().Elem()))
 			}
 		},
@@ -361,11 +360,10 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
 			if p.IsNil() {
 				return conv.Zero()
 			}
-			sp := p.Apply(fieldOffset).AtomicGetPointer()
-			if sp.IsNil() {
+			rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
+			if rv.IsNil() {
 				return conv.Zero()
 			}
-			rv := sp.AsValueOf(fs.Type.Elem())
 			if rv.Elem().Len() == 0 {
 				return conv.Zero()
 			}
@@ -598,30 +596,3 @@ func (mi *MessageInfo) clearPresent(p pointer, index uint32) {
 func (mi *MessageInfo) present(p pointer, index uint32) bool {
 	return p.Apply(mi.presenceOffset).PresenceInfo().Present(index)
 }
-
-// usePresenceForField implements the somewhat intricate logic of when
-// the presence bitmap is used for a field.  The main logic is that a
-// field that is optional or that can be lazy will use the presence
-// bit, but for proto2, also maps have a presence bit. It also records
-// if the field can ever be lazy, which is true if we have a
-// lazyOffset and the field is a message or a slice of messages. A
-// field that is lazy will always need a presence bit.  Oneofs are not
-// lazy and do not use presence, unless they are a synthetic oneof,
-// which is a proto3 optional field. For proto3 optionals, we use the
-// presence and they can also be lazy when applicable (a message).
-func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (usePresence, canBeLazy bool) {
-	hasLazyField := fd.(interface{ IsLazy() bool }).IsLazy()
-
-	// Non-oneof scalar fields with explicit field presence use the presence array.
-	usesPresenceArray := fd.HasPresence() && fd.Message() == nil && (fd.ContainingOneof() == nil || fd.ContainingOneof().IsSynthetic())
-	switch {
-	case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
-		return false, false
-	case fd.IsMap():
-		return false, false
-	case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
-		return hasLazyField, hasLazyField
-	default:
-		return usesPresenceArray || (hasLazyField && fd.HasPresence()), false
-	}
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/presence.go b/vendor/google.golang.org/protobuf/internal/impl/presence.go
index 914cb1de..443afe81 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/presence.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/presence.go
@@ -32,9 +32,6 @@ func (p presence) toElem(num uint32) (ret *uint32) {
 
 // Present checks for the presence of a specific field number in a presence set.
 func (p presence) Present(num uint32) bool {
-	if p.P == nil {
-		return false
-	}
 	return Export{}.Present(p.toElem(num), num)
 }
 
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index aac1cb18..a53364c5 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
 const (
 	Major      = 1
 	Minor      = 36
-	Patch      = 6
+	Patch      = 7
 	PreRelease = ""
 )
 
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
index a4a0a297..730331e6 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source_gen.go
@@ -21,6 +21,8 @@ func (p *SourcePath) appendFileDescriptorProto(b []byte) []byte {
 		b = p.appendRepeatedField(b, "public_dependency", nil)
 	case 11:
 		b = p.appendRepeatedField(b, "weak_dependency", nil)
+	case 15:
+		b = p.appendRepeatedField(b, "option_dependency", nil)
 	case 4:
 		b = p.appendRepeatedField(b, "message_type", (*SourcePath).appendDescriptorProto)
 	case 5:
@@ -66,6 +68,8 @@ func (p *SourcePath) appendDescriptorProto(b []byte) []byte {
 		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendDescriptorProto_ReservedRange)
 	case 10:
 		b = p.appendRepeatedField(b, "reserved_name", nil)
+	case 11:
+		b = p.appendSingularField(b, "visibility", nil)
 	}
 	return b
 }
@@ -85,6 +89,8 @@ func (p *SourcePath) appendEnumDescriptorProto(b []byte) []byte {
 		b = p.appendRepeatedField(b, "reserved_range", (*SourcePath).appendEnumDescriptorProto_EnumReservedRange)
 	case 5:
 		b = p.appendRepeatedField(b, "reserved_name", nil)
+	case 6:
+		b = p.appendSingularField(b, "visibility", nil)
 	}
 	return b
 }
@@ -400,6 +406,8 @@ func (p *SourcePath) appendFeatureSet(b []byte) []byte {
 		b = p.appendSingularField(b, "json_format", nil)
 	case 7:
 		b = p.appendSingularField(b, "enforce_naming_style", nil)
+	case 8:
+		b = p.appendSingularField(b, "default_symbol_visibility", nil)
 	}
 	return b
 }
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index 7fe280f1..6843b0be 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -151,6 +151,70 @@ func (Edition) EnumDescriptor() ([]byte, []int) {
 	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{0}
 }
 
+// Describes the 'visibility' of a symbol with respect to the proto import
+// system. Symbols can only be imported when the visibility rules do not prevent
+// it (ex: local symbols cannot be imported).  Visibility modifiers can only set
+// on `message` and `enum` as they are the only types available to be referenced
+// from other files.
+type SymbolVisibility int32
+
+const (
+	SymbolVisibility_VISIBILITY_UNSET  SymbolVisibility = 0
+	SymbolVisibility_VISIBILITY_LOCAL  SymbolVisibility = 1
+	SymbolVisibility_VISIBILITY_EXPORT SymbolVisibility = 2
+)
+
+// Enum value maps for SymbolVisibility.
+var (
+	SymbolVisibility_name = map[int32]string{
+		0: "VISIBILITY_UNSET",
+		1: "VISIBILITY_LOCAL",
+		2: "VISIBILITY_EXPORT",
+	}
+	SymbolVisibility_value = map[string]int32{
+		"VISIBILITY_UNSET":  0,
+		"VISIBILITY_LOCAL":  1,
+		"VISIBILITY_EXPORT": 2,
+	}
+)
+
+func (x SymbolVisibility) Enum() *SymbolVisibility {
+	p := new(SymbolVisibility)
+	*p = x
+	return p
+}
+
+func (x SymbolVisibility) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (SymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+}
+
+func (SymbolVisibility) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[1]
+}
+
+func (x SymbolVisibility) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *SymbolVisibility) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = SymbolVisibility(num)
+	return nil
+}
+
+// Deprecated: Use SymbolVisibility.Descriptor instead.
+func (SymbolVisibility) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{1}
+}
+
 // The verification state of the extension range.
 type ExtensionRangeOptions_VerificationState int32
 
@@ -183,11 +247,11 @@ func (x ExtensionRangeOptions_VerificationState) String() string {
 }
 
 func (ExtensionRangeOptions_VerificationState) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[1].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
 }
 
 func (ExtensionRangeOptions_VerificationState) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[1]
+	return &file_google_protobuf_descriptor_proto_enumTypes[2]
 }
 
 func (x ExtensionRangeOptions_VerificationState) Number() protoreflect.EnumNumber {
@@ -299,11 +363,11 @@ func (x FieldDescriptorProto_Type) String() string {
 }
 
 func (FieldDescriptorProto_Type) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[2].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
 }
 
 func (FieldDescriptorProto_Type) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[2]
+	return &file_google_protobuf_descriptor_proto_enumTypes[3]
 }
 
 func (x FieldDescriptorProto_Type) Number() protoreflect.EnumNumber {
@@ -362,11 +426,11 @@ func (x FieldDescriptorProto_Label) String() string {
 }
 
 func (FieldDescriptorProto_Label) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[3].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
 }
 
 func (FieldDescriptorProto_Label) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[3]
+	return &file_google_protobuf_descriptor_proto_enumTypes[4]
 }
 
 func (x FieldDescriptorProto_Label) Number() protoreflect.EnumNumber {
@@ -423,11 +487,11 @@ func (x FileOptions_OptimizeMode) String() string {
 }
 
 func (FileOptions_OptimizeMode) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[4].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
 }
 
 func (FileOptions_OptimizeMode) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[4]
+	return &file_google_protobuf_descriptor_proto_enumTypes[5]
 }
 
 func (x FileOptions_OptimizeMode) Number() protoreflect.EnumNumber {
@@ -489,11 +553,11 @@ func (x FieldOptions_CType) String() string {
 }
 
 func (FieldOptions_CType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[5].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
 }
 
 func (FieldOptions_CType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[5]
+	return &file_google_protobuf_descriptor_proto_enumTypes[6]
 }
 
 func (x FieldOptions_CType) Number() protoreflect.EnumNumber {
@@ -551,11 +615,11 @@ func (x FieldOptions_JSType) String() string {
 }
 
 func (FieldOptions_JSType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[6].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
 }
 
 func (FieldOptions_JSType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[6]
+	return &file_google_protobuf_descriptor_proto_enumTypes[7]
 }
 
 func (x FieldOptions_JSType) Number() protoreflect.EnumNumber {
@@ -611,11 +675,11 @@ func (x FieldOptions_OptionRetention) String() string {
 }
 
 func (FieldOptions_OptionRetention) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[7].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
 }
 
 func (FieldOptions_OptionRetention) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[7]
+	return &file_google_protobuf_descriptor_proto_enumTypes[8]
 }
 
 func (x FieldOptions_OptionRetention) Number() protoreflect.EnumNumber {
@@ -694,11 +758,11 @@ func (x FieldOptions_OptionTargetType) String() string {
 }
 
 func (FieldOptions_OptionTargetType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[8].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
 }
 
 func (FieldOptions_OptionTargetType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[8]
+	return &file_google_protobuf_descriptor_proto_enumTypes[9]
 }
 
 func (x FieldOptions_OptionTargetType) Number() protoreflect.EnumNumber {
@@ -756,11 +820,11 @@ func (x MethodOptions_IdempotencyLevel) String() string {
 }
 
 func (MethodOptions_IdempotencyLevel) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[9].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
 }
 
 func (MethodOptions_IdempotencyLevel) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[9]
+	return &file_google_protobuf_descriptor_proto_enumTypes[10]
 }
 
 func (x MethodOptions_IdempotencyLevel) Number() protoreflect.EnumNumber {
@@ -818,11 +882,11 @@ func (x FeatureSet_FieldPresence) String() string {
 }
 
 func (FeatureSet_FieldPresence) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[10].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
 }
 
 func (FeatureSet_FieldPresence) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[10]
+	return &file_google_protobuf_descriptor_proto_enumTypes[11]
 }
 
 func (x FeatureSet_FieldPresence) Number() protoreflect.EnumNumber {
@@ -877,11 +941,11 @@ func (x FeatureSet_EnumType) String() string {
 }
 
 func (FeatureSet_EnumType) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[11].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
 }
 
 func (FeatureSet_EnumType) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[11]
+	return &file_google_protobuf_descriptor_proto_enumTypes[12]
 }
 
 func (x FeatureSet_EnumType) Number() protoreflect.EnumNumber {
@@ -936,11 +1000,11 @@ func (x FeatureSet_RepeatedFieldEncoding) String() string {
 }
 
 func (FeatureSet_RepeatedFieldEncoding) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[12].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
 }
 
 func (FeatureSet_RepeatedFieldEncoding) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[12]
+	return &file_google_protobuf_descriptor_proto_enumTypes[13]
 }
 
 func (x FeatureSet_RepeatedFieldEncoding) Number() protoreflect.EnumNumber {
@@ -995,11 +1059,11 @@ func (x FeatureSet_Utf8Validation) String() string {
 }
 
 func (FeatureSet_Utf8Validation) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[13].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
 }
 
 func (FeatureSet_Utf8Validation) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[13]
+	return &file_google_protobuf_descriptor_proto_enumTypes[14]
 }
 
 func (x FeatureSet_Utf8Validation) Number() protoreflect.EnumNumber {
@@ -1054,11 +1118,11 @@ func (x FeatureSet_MessageEncoding) String() string {
 }
 
 func (FeatureSet_MessageEncoding) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[14].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
 }
 
 func (FeatureSet_MessageEncoding) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[14]
+	return &file_google_protobuf_descriptor_proto_enumTypes[15]
 }
 
 func (x FeatureSet_MessageEncoding) Number() protoreflect.EnumNumber {
@@ -1113,11 +1177,11 @@ func (x FeatureSet_JsonFormat) String() string {
 }
 
 func (FeatureSet_JsonFormat) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[15].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
 }
 
 func (FeatureSet_JsonFormat) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[15]
+	return &file_google_protobuf_descriptor_proto_enumTypes[16]
 }
 
 func (x FeatureSet_JsonFormat) Number() protoreflect.EnumNumber {
@@ -1172,11 +1236,11 @@ func (x FeatureSet_EnforceNamingStyle) String() string {
 }
 
 func (FeatureSet_EnforceNamingStyle) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[16].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor()
 }
 
 func (FeatureSet_EnforceNamingStyle) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[16]
+	return &file_google_protobuf_descriptor_proto_enumTypes[17]
 }
 
 func (x FeatureSet_EnforceNamingStyle) Number() protoreflect.EnumNumber {
@@ -1198,6 +1262,77 @@ func (FeatureSet_EnforceNamingStyle) EnumDescriptor() ([]byte, []int) {
 	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 6}
 }
 
+type FeatureSet_VisibilityFeature_DefaultSymbolVisibility int32
+
+const (
+	FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 0
+	// Default pre-EDITION_2024, all UNSET visibility are export.
+	FeatureSet_VisibilityFeature_EXPORT_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 1
+	// All top-level symbols default to export, nested default to local.
+	FeatureSet_VisibilityFeature_EXPORT_TOP_LEVEL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 2
+	// All symbols default to local.
+	FeatureSet_VisibilityFeature_LOCAL_ALL FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 3
+	// All symbols local by default. Nested types cannot be exported.
+	// With special case caveat for message { enum {} reserved 1 to max; }
+	// This is the recommended setting for new protos.
+	FeatureSet_VisibilityFeature_STRICT FeatureSet_VisibilityFeature_DefaultSymbolVisibility = 4
+)
+
+// Enum value maps for FeatureSet_VisibilityFeature_DefaultSymbolVisibility.
+var (
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_name = map[int32]string{
+		0: "DEFAULT_SYMBOL_VISIBILITY_UNKNOWN",
+		1: "EXPORT_ALL",
+		2: "EXPORT_TOP_LEVEL",
+		3: "LOCAL_ALL",
+		4: "STRICT",
+	}
+	FeatureSet_VisibilityFeature_DefaultSymbolVisibility_value = map[string]int32{
+		"DEFAULT_SYMBOL_VISIBILITY_UNKNOWN": 0,
+		"EXPORT_ALL":                        1,
+		"EXPORT_TOP_LEVEL":                  2,
+		"LOCAL_ALL":                         3,
+		"STRICT":                            4,
+	}
+)
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Enum() *FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+	p := new(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)
+	*p = x
+	return p
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) String() string {
+	return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Descriptor() protoreflect.EnumDescriptor {
+	return file_google_protobuf_descriptor_proto_enumTypes[18].Descriptor()
+}
+
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Type() protoreflect.EnumType {
+	return &file_google_protobuf_descriptor_proto_enumTypes[18]
+}
+
+func (x FeatureSet_VisibilityFeature_DefaultSymbolVisibility) Number() protoreflect.EnumNumber {
+	return protoreflect.EnumNumber(x)
+}
+
+// Deprecated: Do not use.
+func (x *FeatureSet_VisibilityFeature_DefaultSymbolVisibility) UnmarshalJSON(b []byte) error {
+	num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)
+	if err != nil {
+		return err
+	}
+	*x = FeatureSet_VisibilityFeature_DefaultSymbolVisibility(num)
+	return nil
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature_DefaultSymbolVisibility.Descriptor instead.
+func (FeatureSet_VisibilityFeature_DefaultSymbolVisibility) EnumDescriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0, 0}
+}
+
 // Represents the identified object's effect on the element in the original
 // .proto file.
 type GeneratedCodeInfo_Annotation_Semantic int32
@@ -1236,11 +1371,11 @@ func (x GeneratedCodeInfo_Annotation_Semantic) String() string {
 }
 
 func (GeneratedCodeInfo_Annotation_Semantic) Descriptor() protoreflect.EnumDescriptor {
-	return file_google_protobuf_descriptor_proto_enumTypes[17].Descriptor()
+	return file_google_protobuf_descriptor_proto_enumTypes[19].Descriptor()
 }
 
 func (GeneratedCodeInfo_Annotation_Semantic) Type() protoreflect.EnumType {
-	return &file_google_protobuf_descriptor_proto_enumTypes[17]
+	return &file_google_protobuf_descriptor_proto_enumTypes[19]
 }
 
 func (x GeneratedCodeInfo_Annotation_Semantic) Number() protoreflect.EnumNumber {
@@ -1321,6 +1456,9 @@ type FileDescriptorProto struct {
 	// Indexes of the weak imported files in the dependency list.
 	// For Google-internal migration only. Do not use.
 	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+	// Names of files imported by this file purely for the purpose of providing
+	// option extensions. These are excluded from the dependency list above.
+	OptionDependency []string `protobuf:"bytes,15,rep,name=option_dependency,json=optionDependency" json:"option_dependency,omitempty"`
 	// All top-level definitions in this file.
 	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
 	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
@@ -1414,6 +1552,13 @@ func (x *FileDescriptorProto) GetWeakDependency() []int32 {
 	return nil
 }
 
+func (x *FileDescriptorProto) GetOptionDependency() []string {
+	if x != nil {
+		return x.OptionDependency
+	}
+	return nil
+}
+
 func (x *FileDescriptorProto) GetMessageType() []*DescriptorProto {
 	if x != nil {
 		return x.MessageType
@@ -1484,7 +1629,9 @@ type DescriptorProto struct {
 	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved field names, which may not be used by fields in the same message.
 	// A given name may only be reserved once.
-	ReservedName  []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	// Support for `export` and `local` keywords on enums.
+	Visibility    *SymbolVisibility `protobuf:"varint,11,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
 	unknownFields protoimpl.UnknownFields
 	sizeCache     protoimpl.SizeCache
 }
@@ -1589,6 +1736,13 @@ func (x *DescriptorProto) GetReservedName() []string {
 	return nil
 }
 
+func (x *DescriptorProto) GetVisibility() SymbolVisibility {
+	if x != nil && x.Visibility != nil {
+		return *x.Visibility
+	}
+	return SymbolVisibility_VISIBILITY_UNSET
+}
+
 type ExtensionRangeOptions struct {
 	state protoimpl.MessageState `protogen:"open.v1"`
 	// The parser stores options it doesn't recognize here. See above.
@@ -1901,7 +2055,9 @@ type EnumDescriptorProto struct {
 	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
 	// Reserved enum value names, which may not be reused. A given name may only
 	// be reserved once.
-	ReservedName  []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	// Support for `export` and `local` keywords on enums.
+	Visibility    *SymbolVisibility `protobuf:"varint,6,opt,name=visibility,enum=google.protobuf.SymbolVisibility" json:"visibility,omitempty"`
 	unknownFields protoimpl.UnknownFields
 	sizeCache     protoimpl.SizeCache
 }
@@ -1971,6 +2127,13 @@ func (x *EnumDescriptorProto) GetReservedName() []string {
 	return nil
 }
 
+func (x *EnumDescriptorProto) GetVisibility() SymbolVisibility {
+	if x != nil && x.Visibility != nil {
+		return *x.Visibility
+	}
+	return SymbolVisibility_VISIBILITY_UNSET
+}
+
 // Describes a value within an enum.
 type EnumValueDescriptorProto struct {
 	state         protoimpl.MessageState `protogen:"open.v1"`
@@ -3392,17 +3555,18 @@ func (x *UninterpretedOption) GetAggregateValue() string {
 // be designed and implemented to handle this, hopefully before we ever hit a
 // conflict here.
 type FeatureSet struct {
-	state                 protoimpl.MessageState            `protogen:"open.v1"`
-	FieldPresence         *FeatureSet_FieldPresence         `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
-	EnumType              *FeatureSet_EnumType              `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
-	RepeatedFieldEncoding *FeatureSet_RepeatedFieldEncoding `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
-	Utf8Validation        *FeatureSet_Utf8Validation        `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
-	MessageEncoding       *FeatureSet_MessageEncoding       `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
-	JsonFormat            *FeatureSet_JsonFormat            `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
-	EnforceNamingStyle    *FeatureSet_EnforceNamingStyle    `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"`
-	extensionFields       protoimpl.ExtensionFields
-	unknownFields         protoimpl.UnknownFields
-	sizeCache             protoimpl.SizeCache
+	state                   protoimpl.MessageState                                `protogen:"open.v1"`
+	FieldPresence           *FeatureSet_FieldPresence                             `protobuf:"varint,1,opt,name=field_presence,json=fieldPresence,enum=google.protobuf.FeatureSet_FieldPresence" json:"field_presence,omitempty"`
+	EnumType                *FeatureSet_EnumType                                  `protobuf:"varint,2,opt,name=enum_type,json=enumType,enum=google.protobuf.FeatureSet_EnumType" json:"enum_type,omitempty"`
+	RepeatedFieldEncoding   *FeatureSet_RepeatedFieldEncoding                     `protobuf:"varint,3,opt,name=repeated_field_encoding,json=repeatedFieldEncoding,enum=google.protobuf.FeatureSet_RepeatedFieldEncoding" json:"repeated_field_encoding,omitempty"`
+	Utf8Validation          *FeatureSet_Utf8Validation                            `protobuf:"varint,4,opt,name=utf8_validation,json=utf8Validation,enum=google.protobuf.FeatureSet_Utf8Validation" json:"utf8_validation,omitempty"`
+	MessageEncoding         *FeatureSet_MessageEncoding                           `protobuf:"varint,5,opt,name=message_encoding,json=messageEncoding,enum=google.protobuf.FeatureSet_MessageEncoding" json:"message_encoding,omitempty"`
+	JsonFormat              *FeatureSet_JsonFormat                                `protobuf:"varint,6,opt,name=json_format,json=jsonFormat,enum=google.protobuf.FeatureSet_JsonFormat" json:"json_format,omitempty"`
+	EnforceNamingStyle      *FeatureSet_EnforceNamingStyle                        `protobuf:"varint,7,opt,name=enforce_naming_style,json=enforceNamingStyle,enum=google.protobuf.FeatureSet_EnforceNamingStyle" json:"enforce_naming_style,omitempty"`
+	DefaultSymbolVisibility *FeatureSet_VisibilityFeature_DefaultSymbolVisibility `protobuf:"varint,8,opt,name=default_symbol_visibility,json=defaultSymbolVisibility,enum=google.protobuf.FeatureSet_VisibilityFeature_DefaultSymbolVisibility" json:"default_symbol_visibility,omitempty"`
+	extensionFields         protoimpl.ExtensionFields
+	unknownFields           protoimpl.UnknownFields
+	sizeCache               protoimpl.SizeCache
 }
 
 func (x *FeatureSet) Reset() {
@@ -3484,6 +3648,13 @@ func (x *FeatureSet) GetEnforceNamingStyle() FeatureSet_EnforceNamingStyle {
 	return FeatureSet_ENFORCE_NAMING_STYLE_UNKNOWN
 }
 
+func (x *FeatureSet) GetDefaultSymbolVisibility() FeatureSet_VisibilityFeature_DefaultSymbolVisibility {
+	if x != nil && x.DefaultSymbolVisibility != nil {
+		return *x.DefaultSymbolVisibility
+	}
+	return FeatureSet_VisibilityFeature_DEFAULT_SYMBOL_VISIBILITY_UNKNOWN
+}
+
 // A compiled specification for the defaults of a set of features.  These
 // messages are generated from FeatureSet extensions and can be used to seed
 // feature resolution. The resolution with this object becomes a simple search
@@ -4144,6 +4315,42 @@ func (x *UninterpretedOption_NamePart) GetIsExtension() bool {
 	return false
 }
 
+type FeatureSet_VisibilityFeature struct {
+	state         protoimpl.MessageState `protogen:"open.v1"`
+	unknownFields protoimpl.UnknownFields
+	sizeCache     protoimpl.SizeCache
+}
+
+func (x *FeatureSet_VisibilityFeature) Reset() {
+	*x = FeatureSet_VisibilityFeature{}
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+	ms.StoreMessageInfo(mi)
+}
+
+func (x *FeatureSet_VisibilityFeature) String() string {
+	return protoimpl.X.MessageStringOf(x)
+}
+
+func (*FeatureSet_VisibilityFeature) ProtoMessage() {}
+
+func (x *FeatureSet_VisibilityFeature) ProtoReflect() protoreflect.Message {
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	if x != nil {
+		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
+		if ms.LoadMessageInfo() == nil {
+			ms.StoreMessageInfo(mi)
+		}
+		return ms
+	}
+	return mi.MessageOf(x)
+}
+
+// Deprecated: Use FeatureSet_VisibilityFeature.ProtoReflect.Descriptor instead.
+func (*FeatureSet_VisibilityFeature) Descriptor() ([]byte, []int) {
+	return file_google_protobuf_descriptor_proto_rawDescGZIP(), []int{19, 0}
+}
+
 // A map from every known edition with a unique set of defaults to its
 // defaults. Not all editions may be contained here.  For a given edition,
 // the defaults at the closest matching edition ordered at or before it should
@@ -4161,7 +4368,7 @@ type FeatureSetDefaults_FeatureSetEditionDefault struct {
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) Reset() {
 	*x = FeatureSetDefaults_FeatureSetEditionDefault{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4173,7 +4380,7 @@ func (x *FeatureSetDefaults_FeatureSetEditionDefault) String() string {
 func (*FeatureSetDefaults_FeatureSetEditionDefault) ProtoMessage() {}
 
 func (x *FeatureSetDefaults_FeatureSetEditionDefault) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[30]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4309,7 +4516,7 @@ type SourceCodeInfo_Location struct {
 
 func (x *SourceCodeInfo_Location) Reset() {
 	*x = SourceCodeInfo_Location{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4321,7 +4528,7 @@ func (x *SourceCodeInfo_Location) String() string {
 func (*SourceCodeInfo_Location) ProtoMessage() {}
 
 func (x *SourceCodeInfo_Location) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[31]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4393,7 +4600,7 @@ type GeneratedCodeInfo_Annotation struct {
 
 func (x *GeneratedCodeInfo_Annotation) Reset() {
 	*x = GeneratedCodeInfo_Annotation{}
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
 	ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 	ms.StoreMessageInfo(mi)
 }
@@ -4405,7 +4612,7 @@ func (x *GeneratedCodeInfo_Annotation) String() string {
 func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
 
 func (x *GeneratedCodeInfo_Annotation) ProtoReflect() protoreflect.Message {
-	mi := &file_google_protobuf_descriptor_proto_msgTypes[32]
+	mi := &file_google_protobuf_descriptor_proto_msgTypes[33]
 	if x != nil {
 		ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
 		if ms.LoadMessageInfo() == nil {
@@ -4462,7 +4669,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
 	"\n" +
 	" google/protobuf/descriptor.proto\x12\x0fgoogle.protobuf\"[\n" +
 	"\x11FileDescriptorSet\x128\n" +
-	"\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\x98\x05\n" +
+	"\x04file\x18\x01 \x03(\v2$.google.protobuf.FileDescriptorProtoR\x04file*\f\b\x80\xec\xca\xff\x01\x10\x81\xec\xca\xff\x01\"\xc5\x05\n" +
 	"\x13FileDescriptorProto\x12\x12\n" +
 	"\x04name\x18\x01 \x01(\tR\x04name\x12\x18\n" +
 	"\apackage\x18\x02 \x01(\tR\apackage\x12\x1e\n" +
@@ -4471,7 +4678,8 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
 	"dependency\x12+\n" +
 	"\x11public_dependency\x18\n" +
 	" \x03(\x05R\x10publicDependency\x12'\n" +
-	"\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12C\n" +
+	"\x0fweak_dependency\x18\v \x03(\x05R\x0eweakDependency\x12+\n" +
+	"\x11option_dependency\x18\x0f \x03(\tR\x10optionDependency\x12C\n" +
 	"\fmessage_type\x18\x04 \x03(\v2 .google.protobuf.DescriptorProtoR\vmessageType\x12A\n" +
 	"\tenum_type\x18\x05 \x03(\v2$.google.protobuf.EnumDescriptorProtoR\benumType\x12A\n" +
 	"\aservice\x18\x06 \x03(\v2'.google.protobuf.ServiceDescriptorProtoR\aservice\x12C\n" +
@@ -4479,7 +4687,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
 	"\aoptions\x18\b \x01(\v2\x1c.google.protobuf.FileOptionsR\aoptions\x12I\n" +
 	"\x10source_code_info\x18\t \x01(\v2\x1f.google.protobuf.SourceCodeInfoR\x0esourceCodeInfo\x12\x16\n" +
 	"\x06syntax\x18\f \x01(\tR\x06syntax\x122\n" +
-	"\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xb9\x06\n" +
+	"\aedition\x18\x0e \x01(\x0e2\x18.google.protobuf.EditionR\aedition\"\xfc\x06\n" +
 	"\x0fDescriptorProto\x12\x12\n" +
 	"\x04name\x18\x01 \x01(\tR\x04name\x12;\n" +
 	"\x05field\x18\x02 \x03(\v2%.google.protobuf.FieldDescriptorProtoR\x05field\x12C\n" +
@@ -4493,7 +4701,10 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
 	"\aoptions\x18\a \x01(\v2\x1f.google.protobuf.MessageOptionsR\aoptions\x12U\n" +
 	"\x0ereserved_range\x18\t \x03(\v2..google.protobuf.DescriptorProto.ReservedRangeR\rreservedRange\x12#\n" +
 	"\rreserved_name\x18\n" +
-	" \x03(\tR\freservedName\x1az\n" +
+	" \x03(\tR\freservedName\x12A\n" +
+	"\n" +
+	"visibility\x18\v \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+	"visibility\x1az\n" +
 	"\x0eExtensionRange\x12\x14\n" +
 	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
 	"\x03end\x18\x02 \x01(\x05R\x03end\x12@\n" +
@@ -4562,13 +4773,16 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
 	"\x0eLABEL_REQUIRED\x10\x02\"c\n" +
 	"\x14OneofDescriptorProto\x12\x12\n" +
 	"\x04name\x18\x01 \x01(\tR\x04name\x127\n" +
-	"\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xe3\x02\n" +
+	"\aoptions\x18\x02 \x01(\v2\x1d.google.protobuf.OneofOptionsR\aoptions\"\xa6\x03\n" +
 	"\x13EnumDescriptorProto\x12\x12\n" +
 	"\x04name\x18\x01 \x01(\tR\x04name\x12?\n" +
 	"\x05value\x18\x02 \x03(\v2).google.protobuf.EnumValueDescriptorProtoR\x05value\x126\n" +
 	"\aoptions\x18\x03 \x01(\v2\x1c.google.protobuf.EnumOptionsR\aoptions\x12]\n" +
 	"\x0ereserved_range\x18\x04 \x03(\v26.google.protobuf.EnumDescriptorProto.EnumReservedRangeR\rreservedRange\x12#\n" +
-	"\rreserved_name\x18\x05 \x03(\tR\freservedName\x1a;\n" +
+	"\rreserved_name\x18\x05 \x03(\tR\freservedName\x12A\n" +
+	"\n" +
+	"visibility\x18\x06 \x01(\x0e2!.google.protobuf.SymbolVisibilityR\n" +
+	"visibility\x1a;\n" +
 	"\x11EnumReservedRange\x12\x14\n" +
 	"\x05start\x18\x01 \x01(\x05R\x05start\x12\x10\n" +
 	"\x03end\x18\x02 \x01(\x05R\x03end\"\x83\x01\n" +
@@ -4728,7 +4942,7 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
 	"\x0faggregate_value\x18\b \x01(\tR\x0eaggregateValue\x1aJ\n" +
 	"\bNamePart\x12\x1b\n" +
 	"\tname_part\x18\x01 \x02(\tR\bnamePart\x12!\n" +
-	"\fis_extension\x18\x02 \x02(\bR\visExtension\"\xae\f\n" +
+	"\fis_extension\x18\x02 \x02(\bR\visExtension\"\x8e\x0f\n" +
 	"\n" +
 	"FeatureSet\x12\x91\x01\n" +
 	"\x0efield_presence\x18\x01 \x01(\x0e2).google.protobuf.FeatureSet.FieldPresenceB?\x88\x01\x01\x98\x01\x04\x98\x01\x01\xa2\x01\r\x12\bEXPLICIT\x18\x84\a\xa2\x01\r\x12\bIMPLICIT\x18\xe7\a\xa2\x01\r\x12\bEXPLICIT\x18\xe8\a\xb2\x01\x03\b\xe8\aR\rfieldPresence\x12l\n" +
@@ -4739,7 +4953,18 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
 	"\vjson_format\x18\x06 \x01(\x0e2&.google.protobuf.FeatureSet.JsonFormatB9\x88\x01\x01\x98\x01\x03\x98\x01\x06\x98\x01\x01\xa2\x01\x17\x12\x12LEGACY_BEST_EFFORT\x18\x84\a\xa2\x01\n" +
 	"\x12\x05ALLOW\x18\xe7\a\xb2\x01\x03\b\xe8\aR\n" +
 	"jsonFormat\x12\xab\x01\n" +
-	"\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\"\\\n" +
+	"\x14enforce_naming_style\x18\a \x01(\x0e2..google.protobuf.FeatureSet.EnforceNamingStyleBI\x88\x01\x02\x98\x01\x01\x98\x01\x02\x98\x01\x03\x98\x01\x04\x98\x01\x05\x98\x01\x06\x98\x01\a\x98\x01\b\x98\x01\t\xa2\x01\x11\x12\fSTYLE_LEGACY\x18\x84\a\xa2\x01\x0e\x12\tSTYLE2024\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x12enforceNamingStyle\x12\xb9\x01\n" +
+	"\x19default_symbol_visibility\x18\b \x01(\x0e2E.google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibilityB6\x88\x01\x02\x98\x01\x01\xa2\x01\x0f\x12\n" +
+	"EXPORT_ALL\x18\x84\a\xa2\x01\x15\x12\x10EXPORT_TOP_LEVEL\x18\xe9\a\xb2\x01\x03\b\xe9\aR\x17defaultSymbolVisibility\x1a\xa1\x01\n" +
+	"\x11VisibilityFeature\"\x81\x01\n" +
+	"\x17DefaultSymbolVisibility\x12%\n" +
+	"!DEFAULT_SYMBOL_VISIBILITY_UNKNOWN\x10\x00\x12\x0e\n" +
+	"\n" +
+	"EXPORT_ALL\x10\x01\x12\x14\n" +
+	"\x10EXPORT_TOP_LEVEL\x10\x02\x12\r\n" +
+	"\tLOCAL_ALL\x10\x03\x12\n" +
+	"\n" +
+	"\x06STRICT\x10\x04J\b\b\x01\x10\x80\x80\x80\x80\x02\"\\\n" +
 	"\rFieldPresence\x12\x1a\n" +
 	"\x16FIELD_PRESENCE_UNKNOWN\x10\x00\x12\f\n" +
 	"\bEXPLICIT\x10\x01\x12\f\n" +
@@ -4817,7 +5042,11 @@ const file_google_protobuf_descriptor_proto_rawDesc = "" +
 	"\x17EDITION_99997_TEST_ONLY\x10\x9d\x8d\x06\x12\x1d\n" +
 	"\x17EDITION_99998_TEST_ONLY\x10\x9e\x8d\x06\x12\x1d\n" +
 	"\x17EDITION_99999_TEST_ONLY\x10\x9f\x8d\x06\x12\x13\n" +
-	"\vEDITION_MAX\x10\xff\xff\xff\xff\aB~\n" +
+	"\vEDITION_MAX\x10\xff\xff\xff\xff\a*U\n" +
+	"\x10SymbolVisibility\x12\x14\n" +
+	"\x10VISIBILITY_UNSET\x10\x00\x12\x14\n" +
+	"\x10VISIBILITY_LOCAL\x10\x01\x12\x15\n" +
+	"\x11VISIBILITY_EXPORT\x10\x02B~\n" +
 	"\x13com.google.protobufB\x10DescriptorProtosH\x01Z-google.golang.org/protobuf/types/descriptorpb\xf8\x01\x01\xa2\x02\x03GPB\xaa\x02\x1aGoogle.Protobuf.Reflection"
 
 var (
@@ -4832,145 +5061,151 @@ func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
 	return file_google_protobuf_descriptor_proto_rawDescData
 }
 
-var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 18)
-var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 33)
+var file_google_protobuf_descriptor_proto_enumTypes = make([]protoimpl.EnumInfo, 20)
+var file_google_protobuf_descriptor_proto_msgTypes = make([]protoimpl.MessageInfo, 34)
 var file_google_protobuf_descriptor_proto_goTypes = []any{
-	(Edition)(0), // 0: google.protobuf.Edition
-	(ExtensionRangeOptions_VerificationState)(0),        // 1: google.protobuf.ExtensionRangeOptions.VerificationState
-	(FieldDescriptorProto_Type)(0),                      // 2: google.protobuf.FieldDescriptorProto.Type
-	(FieldDescriptorProto_Label)(0),                     // 3: google.protobuf.FieldDescriptorProto.Label
-	(FileOptions_OptimizeMode)(0),                       // 4: google.protobuf.FileOptions.OptimizeMode
-	(FieldOptions_CType)(0),                             // 5: google.protobuf.FieldOptions.CType
-	(FieldOptions_JSType)(0),                            // 6: google.protobuf.FieldOptions.JSType
-	(FieldOptions_OptionRetention)(0),                   // 7: google.protobuf.FieldOptions.OptionRetention
-	(FieldOptions_OptionTargetType)(0),                  // 8: google.protobuf.FieldOptions.OptionTargetType
-	(MethodOptions_IdempotencyLevel)(0),                 // 9: google.protobuf.MethodOptions.IdempotencyLevel
-	(FeatureSet_FieldPresence)(0),                       // 10: google.protobuf.FeatureSet.FieldPresence
-	(FeatureSet_EnumType)(0),                            // 11: google.protobuf.FeatureSet.EnumType
-	(FeatureSet_RepeatedFieldEncoding)(0),               // 12: google.protobuf.FeatureSet.RepeatedFieldEncoding
-	(FeatureSet_Utf8Validation)(0),                      // 13: google.protobuf.FeatureSet.Utf8Validation
-	(FeatureSet_MessageEncoding)(0),                     // 14: google.protobuf.FeatureSet.MessageEncoding
-	(FeatureSet_JsonFormat)(0),                          // 15: google.protobuf.FeatureSet.JsonFormat
-	(FeatureSet_EnforceNamingStyle)(0),                  // 16: google.protobuf.FeatureSet.EnforceNamingStyle
-	(GeneratedCodeInfo_Annotation_Semantic)(0),          // 17: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
-	(*FileDescriptorSet)(nil),                           // 18: google.protobuf.FileDescriptorSet
-	(*FileDescriptorProto)(nil),                         // 19: google.protobuf.FileDescriptorProto
-	(*DescriptorProto)(nil),                             // 20: google.protobuf.DescriptorProto
-	(*ExtensionRangeOptions)(nil),                       // 21: google.protobuf.ExtensionRangeOptions
-	(*FieldDescriptorProto)(nil),                        // 22: google.protobuf.FieldDescriptorProto
-	(*OneofDescriptorProto)(nil),                        // 23: google.protobuf.OneofDescriptorProto
-	(*EnumDescriptorProto)(nil),                         // 24: google.protobuf.EnumDescriptorProto
-	(*EnumValueDescriptorProto)(nil),                    // 25: google.protobuf.EnumValueDescriptorProto
-	(*ServiceDescriptorProto)(nil),                      // 26: google.protobuf.ServiceDescriptorProto
-	(*MethodDescriptorProto)(nil),                       // 27: google.protobuf.MethodDescriptorProto
-	(*FileOptions)(nil),                                 // 28: google.protobuf.FileOptions
-	(*MessageOptions)(nil),                              // 29: google.protobuf.MessageOptions
-	(*FieldOptions)(nil),                                // 30: google.protobuf.FieldOptions
-	(*OneofOptions)(nil),                                // 31: google.protobuf.OneofOptions
-	(*EnumOptions)(nil),                                 // 32: google.protobuf.EnumOptions
-	(*EnumValueOptions)(nil),                            // 33: google.protobuf.EnumValueOptions
-	(*ServiceOptions)(nil),                              // 34: google.protobuf.ServiceOptions
-	(*MethodOptions)(nil),                               // 35: google.protobuf.MethodOptions
-	(*UninterpretedOption)(nil),                         // 36: google.protobuf.UninterpretedOption
-	(*FeatureSet)(nil),                                  // 37: google.protobuf.FeatureSet
-	(*FeatureSetDefaults)(nil),                          // 38: google.protobuf.FeatureSetDefaults
-	(*SourceCodeInfo)(nil),                              // 39: google.protobuf.SourceCodeInfo
-	(*GeneratedCodeInfo)(nil),                           // 40: google.protobuf.GeneratedCodeInfo
-	(*DescriptorProto_ExtensionRange)(nil),              // 41: google.protobuf.DescriptorProto.ExtensionRange
-	(*DescriptorProto_ReservedRange)(nil),               // 42: google.protobuf.DescriptorProto.ReservedRange
-	(*ExtensionRangeOptions_Declaration)(nil),           // 43: google.protobuf.ExtensionRangeOptions.Declaration
-	(*EnumDescriptorProto_EnumReservedRange)(nil),       // 44: google.protobuf.EnumDescriptorProto.EnumReservedRange
-	(*FieldOptions_EditionDefault)(nil),                 // 45: google.protobuf.FieldOptions.EditionDefault
-	(*FieldOptions_FeatureSupport)(nil),                 // 46: google.protobuf.FieldOptions.FeatureSupport
-	(*UninterpretedOption_NamePart)(nil),                // 47: google.protobuf.UninterpretedOption.NamePart
-	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil), // 48: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
-	(*SourceCodeInfo_Location)(nil),                     // 49: google.protobuf.SourceCodeInfo.Location
-	(*GeneratedCodeInfo_Annotation)(nil),                // 50: google.protobuf.GeneratedCodeInfo.Annotation
+	(Edition)(0),          // 0: google.protobuf.Edition
+	(SymbolVisibility)(0), // 1: google.protobuf.SymbolVisibility
+	(ExtensionRangeOptions_VerificationState)(0),              // 2: google.protobuf.ExtensionRangeOptions.VerificationState
+	(FieldDescriptorProto_Type)(0),                            // 3: google.protobuf.FieldDescriptorProto.Type
+	(FieldDescriptorProto_Label)(0),                           // 4: google.protobuf.FieldDescriptorProto.Label
+	(FileOptions_OptimizeMode)(0),                             // 5: google.protobuf.FileOptions.OptimizeMode
+	(FieldOptions_CType)(0),                                   // 6: google.protobuf.FieldOptions.CType
+	(FieldOptions_JSType)(0),                                  // 7: google.protobuf.FieldOptions.JSType
+	(FieldOptions_OptionRetention)(0),                         // 8: google.protobuf.FieldOptions.OptionRetention
+	(FieldOptions_OptionTargetType)(0),                        // 9: google.protobuf.FieldOptions.OptionTargetType
+	(MethodOptions_IdempotencyLevel)(0),                       // 10: google.protobuf.MethodOptions.IdempotencyLevel
+	(FeatureSet_FieldPresence)(0),                             // 11: google.protobuf.FeatureSet.FieldPresence
+	(FeatureSet_EnumType)(0),                                  // 12: google.protobuf.FeatureSet.EnumType
+	(FeatureSet_RepeatedFieldEncoding)(0),                     // 13: google.protobuf.FeatureSet.RepeatedFieldEncoding
+	(FeatureSet_Utf8Validation)(0),                            // 14: google.protobuf.FeatureSet.Utf8Validation
+	(FeatureSet_MessageEncoding)(0),                           // 15: google.protobuf.FeatureSet.MessageEncoding
+	(FeatureSet_JsonFormat)(0),                                // 16: google.protobuf.FeatureSet.JsonFormat
+	(FeatureSet_EnforceNamingStyle)(0),                        // 17: google.protobuf.FeatureSet.EnforceNamingStyle
+	(FeatureSet_VisibilityFeature_DefaultSymbolVisibility)(0), // 18: google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+	(GeneratedCodeInfo_Annotation_Semantic)(0),                // 19: google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+	(*FileDescriptorSet)(nil),                                 // 20: google.protobuf.FileDescriptorSet
+	(*FileDescriptorProto)(nil),                               // 21: google.protobuf.FileDescriptorProto
+	(*DescriptorProto)(nil),                                   // 22: google.protobuf.DescriptorProto
+	(*ExtensionRangeOptions)(nil),                             // 23: google.protobuf.ExtensionRangeOptions
+	(*FieldDescriptorProto)(nil),                              // 24: google.protobuf.FieldDescriptorProto
+	(*OneofDescriptorProto)(nil),                              // 25: google.protobuf.OneofDescriptorProto
+	(*EnumDescriptorProto)(nil),                               // 26: google.protobuf.EnumDescriptorProto
+	(*EnumValueDescriptorProto)(nil),                          // 27: google.protobuf.EnumValueDescriptorProto
+	(*ServiceDescriptorProto)(nil),                            // 28: google.protobuf.ServiceDescriptorProto
+	(*MethodDescriptorProto)(nil),                             // 29: google.protobuf.MethodDescriptorProto
+	(*FileOptions)(nil),                                       // 30: google.protobuf.FileOptions
+	(*MessageOptions)(nil),                                    // 31: google.protobuf.MessageOptions
+	(*FieldOptions)(nil),                                      // 32: google.protobuf.FieldOptions
+	(*OneofOptions)(nil),                                      // 33: google.protobuf.OneofOptions
+	(*EnumOptions)(nil),                                       // 34: google.protobuf.EnumOptions
+	(*EnumValueOptions)(nil),                                  // 35: google.protobuf.EnumValueOptions
+	(*ServiceOptions)(nil),                                    // 36: google.protobuf.ServiceOptions
+	(*MethodOptions)(nil),                                     // 37: google.protobuf.MethodOptions
+	(*UninterpretedOption)(nil),                               // 38: google.protobuf.UninterpretedOption
+	(*FeatureSet)(nil),                                        // 39: google.protobuf.FeatureSet
+	(*FeatureSetDefaults)(nil),                                // 40: google.protobuf.FeatureSetDefaults
+	(*SourceCodeInfo)(nil),                                    // 41: google.protobuf.SourceCodeInfo
+	(*GeneratedCodeInfo)(nil),                                 // 42: google.protobuf.GeneratedCodeInfo
+	(*DescriptorProto_ExtensionRange)(nil),                    // 43: google.protobuf.DescriptorProto.ExtensionRange
+	(*DescriptorProto_ReservedRange)(nil),                     // 44: google.protobuf.DescriptorProto.ReservedRange
+	(*ExtensionRangeOptions_Declaration)(nil),                 // 45: google.protobuf.ExtensionRangeOptions.Declaration
+	(*EnumDescriptorProto_EnumReservedRange)(nil),             // 46: google.protobuf.EnumDescriptorProto.EnumReservedRange
+	(*FieldOptions_EditionDefault)(nil),                       // 47: google.protobuf.FieldOptions.EditionDefault
+	(*FieldOptions_FeatureSupport)(nil),                       // 48: google.protobuf.FieldOptions.FeatureSupport
+	(*UninterpretedOption_NamePart)(nil),                      // 49: google.protobuf.UninterpretedOption.NamePart
+	(*FeatureSet_VisibilityFeature)(nil),                      // 50: google.protobuf.FeatureSet.VisibilityFeature
+	(*FeatureSetDefaults_FeatureSetEditionDefault)(nil),       // 51: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+	(*SourceCodeInfo_Location)(nil),                           // 52: google.protobuf.SourceCodeInfo.Location
+	(*GeneratedCodeInfo_Annotation)(nil),                      // 53: google.protobuf.GeneratedCodeInfo.Annotation
 }
 var file_google_protobuf_descriptor_proto_depIdxs = []int32{
-	19, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
-	20, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
-	24, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
-	26, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
-	22, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
-	28, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
-	39, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
+	21, // 0: google.protobuf.FileDescriptorSet.file:type_name -> google.protobuf.FileDescriptorProto
+	22, // 1: google.protobuf.FileDescriptorProto.message_type:type_name -> google.protobuf.DescriptorProto
+	26, // 2: google.protobuf.FileDescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+	28, // 3: google.protobuf.FileDescriptorProto.service:type_name -> google.protobuf.ServiceDescriptorProto
+	24, // 4: google.protobuf.FileDescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+	30, // 5: google.protobuf.FileDescriptorProto.options:type_name -> google.protobuf.FileOptions
+	41, // 6: google.protobuf.FileDescriptorProto.source_code_info:type_name -> google.protobuf.SourceCodeInfo
 	0,  // 7: google.protobuf.FileDescriptorProto.edition:type_name -> google.protobuf.Edition
-	22, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
-	22, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
-	20, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
-	24, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
-	41, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
-	23, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
-	29, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
-	42, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
-	36, // 16: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	43, // 17: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
-	37, // 18: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
-	1,  // 19: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
-	3,  // 20: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
-	2,  // 21: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
-	30, // 22: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
-	31, // 23: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
-	25, // 24: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
-	32, // 25: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
-	44, // 26: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
-	33, // 27: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
-	27, // 28: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
-	34, // 29: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
-	35, // 30: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
-	4,  // 31: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
-	37, // 32: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
-	36, // 33: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	37, // 34: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
-	36, // 35: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	5,  // 36: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
-	6,  // 37: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
-	7,  // 38: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
-	8,  // 39: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
-	45, // 40: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
-	37, // 41: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
-	46, // 42: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
-	36, // 43: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	37, // 44: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
-	36, // 45: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	37, // 46: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
-	36, // 47: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	37, // 48: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
-	46, // 49: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
-	36, // 50: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	37, // 51: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
-	36, // 52: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	9,  // 53: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
-	37, // 54: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
-	36, // 55: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
-	47, // 56: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
-	10, // 57: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
-	11, // 58: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
-	12, // 59: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
-	13, // 60: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
-	14, // 61: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
-	15, // 62: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
-	16, // 63: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle
-	48, // 64: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
-	0,  // 65: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
-	0,  // 66: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
-	49, // 67: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
-	50, // 68: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
-	21, // 69: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
-	0,  // 70: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
-	0,  // 71: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
-	0,  // 72: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
-	0,  // 73: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
-	0,  // 74: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
-	37, // 75: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
-	37, // 76: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
-	17, // 77: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
-	78, // [78:78] is the sub-list for method output_type
-	78, // [78:78] is the sub-list for method input_type
-	78, // [78:78] is the sub-list for extension type_name
-	78, // [78:78] is the sub-list for extension extendee
-	0,  // [0:78] is the sub-list for field type_name
+	24, // 8: google.protobuf.DescriptorProto.field:type_name -> google.protobuf.FieldDescriptorProto
+	24, // 9: google.protobuf.DescriptorProto.extension:type_name -> google.protobuf.FieldDescriptorProto
+	22, // 10: google.protobuf.DescriptorProto.nested_type:type_name -> google.protobuf.DescriptorProto
+	26, // 11: google.protobuf.DescriptorProto.enum_type:type_name -> google.protobuf.EnumDescriptorProto
+	43, // 12: google.protobuf.DescriptorProto.extension_range:type_name -> google.protobuf.DescriptorProto.ExtensionRange
+	25, // 13: google.protobuf.DescriptorProto.oneof_decl:type_name -> google.protobuf.OneofDescriptorProto
+	31, // 14: google.protobuf.DescriptorProto.options:type_name -> google.protobuf.MessageOptions
+	44, // 15: google.protobuf.DescriptorProto.reserved_range:type_name -> google.protobuf.DescriptorProto.ReservedRange
+	1,  // 16: google.protobuf.DescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+	38, // 17: google.protobuf.ExtensionRangeOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	45, // 18: google.protobuf.ExtensionRangeOptions.declaration:type_name -> google.protobuf.ExtensionRangeOptions.Declaration
+	39, // 19: google.protobuf.ExtensionRangeOptions.features:type_name -> google.protobuf.FeatureSet
+	2,  // 20: google.protobuf.ExtensionRangeOptions.verification:type_name -> google.protobuf.ExtensionRangeOptions.VerificationState
+	4,  // 21: google.protobuf.FieldDescriptorProto.label:type_name -> google.protobuf.FieldDescriptorProto.Label
+	3,  // 22: google.protobuf.FieldDescriptorProto.type:type_name -> google.protobuf.FieldDescriptorProto.Type
+	32, // 23: google.protobuf.FieldDescriptorProto.options:type_name -> google.protobuf.FieldOptions
+	33, // 24: google.protobuf.OneofDescriptorProto.options:type_name -> google.protobuf.OneofOptions
+	27, // 25: google.protobuf.EnumDescriptorProto.value:type_name -> google.protobuf.EnumValueDescriptorProto
+	34, // 26: google.protobuf.EnumDescriptorProto.options:type_name -> google.protobuf.EnumOptions
+	46, // 27: google.protobuf.EnumDescriptorProto.reserved_range:type_name -> google.protobuf.EnumDescriptorProto.EnumReservedRange
+	1,  // 28: google.protobuf.EnumDescriptorProto.visibility:type_name -> google.protobuf.SymbolVisibility
+	35, // 29: google.protobuf.EnumValueDescriptorProto.options:type_name -> google.protobuf.EnumValueOptions
+	29, // 30: google.protobuf.ServiceDescriptorProto.method:type_name -> google.protobuf.MethodDescriptorProto
+	36, // 31: google.protobuf.ServiceDescriptorProto.options:type_name -> google.protobuf.ServiceOptions
+	37, // 32: google.protobuf.MethodDescriptorProto.options:type_name -> google.protobuf.MethodOptions
+	5,  // 33: google.protobuf.FileOptions.optimize_for:type_name -> google.protobuf.FileOptions.OptimizeMode
+	39, // 34: google.protobuf.FileOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 35: google.protobuf.FileOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 36: google.protobuf.MessageOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 37: google.protobuf.MessageOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	6,  // 38: google.protobuf.FieldOptions.ctype:type_name -> google.protobuf.FieldOptions.CType
+	7,  // 39: google.protobuf.FieldOptions.jstype:type_name -> google.protobuf.FieldOptions.JSType
+	8,  // 40: google.protobuf.FieldOptions.retention:type_name -> google.protobuf.FieldOptions.OptionRetention
+	9,  // 41: google.protobuf.FieldOptions.targets:type_name -> google.protobuf.FieldOptions.OptionTargetType
+	47, // 42: google.protobuf.FieldOptions.edition_defaults:type_name -> google.protobuf.FieldOptions.EditionDefault
+	39, // 43: google.protobuf.FieldOptions.features:type_name -> google.protobuf.FeatureSet
+	48, // 44: google.protobuf.FieldOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+	38, // 45: google.protobuf.FieldOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 46: google.protobuf.OneofOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 47: google.protobuf.OneofOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 48: google.protobuf.EnumOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 49: google.protobuf.EnumOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 50: google.protobuf.EnumValueOptions.features:type_name -> google.protobuf.FeatureSet
+	48, // 51: google.protobuf.EnumValueOptions.feature_support:type_name -> google.protobuf.FieldOptions.FeatureSupport
+	38, // 52: google.protobuf.EnumValueOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	39, // 53: google.protobuf.ServiceOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 54: google.protobuf.ServiceOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	10, // 55: google.protobuf.MethodOptions.idempotency_level:type_name -> google.protobuf.MethodOptions.IdempotencyLevel
+	39, // 56: google.protobuf.MethodOptions.features:type_name -> google.protobuf.FeatureSet
+	38, // 57: google.protobuf.MethodOptions.uninterpreted_option:type_name -> google.protobuf.UninterpretedOption
+	49, // 58: google.protobuf.UninterpretedOption.name:type_name -> google.protobuf.UninterpretedOption.NamePart
+	11, // 59: google.protobuf.FeatureSet.field_presence:type_name -> google.protobuf.FeatureSet.FieldPresence
+	12, // 60: google.protobuf.FeatureSet.enum_type:type_name -> google.protobuf.FeatureSet.EnumType
+	13, // 61: google.protobuf.FeatureSet.repeated_field_encoding:type_name -> google.protobuf.FeatureSet.RepeatedFieldEncoding
+	14, // 62: google.protobuf.FeatureSet.utf8_validation:type_name -> google.protobuf.FeatureSet.Utf8Validation
+	15, // 63: google.protobuf.FeatureSet.message_encoding:type_name -> google.protobuf.FeatureSet.MessageEncoding
+	16, // 64: google.protobuf.FeatureSet.json_format:type_name -> google.protobuf.FeatureSet.JsonFormat
+	17, // 65: google.protobuf.FeatureSet.enforce_naming_style:type_name -> google.protobuf.FeatureSet.EnforceNamingStyle
+	18, // 66: google.protobuf.FeatureSet.default_symbol_visibility:type_name -> google.protobuf.FeatureSet.VisibilityFeature.DefaultSymbolVisibility
+	51, // 67: google.protobuf.FeatureSetDefaults.defaults:type_name -> google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault
+	0,  // 68: google.protobuf.FeatureSetDefaults.minimum_edition:type_name -> google.protobuf.Edition
+	0,  // 69: google.protobuf.FeatureSetDefaults.maximum_edition:type_name -> google.protobuf.Edition
+	52, // 70: google.protobuf.SourceCodeInfo.location:type_name -> google.protobuf.SourceCodeInfo.Location
+	53, // 71: google.protobuf.GeneratedCodeInfo.annotation:type_name -> google.protobuf.GeneratedCodeInfo.Annotation
+	23, // 72: google.protobuf.DescriptorProto.ExtensionRange.options:type_name -> google.protobuf.ExtensionRangeOptions
+	0,  // 73: google.protobuf.FieldOptions.EditionDefault.edition:type_name -> google.protobuf.Edition
+	0,  // 74: google.protobuf.FieldOptions.FeatureSupport.edition_introduced:type_name -> google.protobuf.Edition
+	0,  // 75: google.protobuf.FieldOptions.FeatureSupport.edition_deprecated:type_name -> google.protobuf.Edition
+	0,  // 76: google.protobuf.FieldOptions.FeatureSupport.edition_removed:type_name -> google.protobuf.Edition
+	0,  // 77: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.edition:type_name -> google.protobuf.Edition
+	39, // 78: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.overridable_features:type_name -> google.protobuf.FeatureSet
+	39, // 79: google.protobuf.FeatureSetDefaults.FeatureSetEditionDefault.fixed_features:type_name -> google.protobuf.FeatureSet
+	19, // 80: google.protobuf.GeneratedCodeInfo.Annotation.semantic:type_name -> google.protobuf.GeneratedCodeInfo.Annotation.Semantic
+	81, // [81:81] is the sub-list for method output_type
+	81, // [81:81] is the sub-list for method input_type
+	81, // [81:81] is the sub-list for extension type_name
+	81, // [81:81] is the sub-list for extension extendee
+	0,  // [0:81] is the sub-list for field type_name
 }
 
 func init() { file_google_protobuf_descriptor_proto_init() }
@@ -4983,8 +5218,8 @@ func file_google_protobuf_descriptor_proto_init() {
 		File: protoimpl.DescBuilder{
 			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
 			RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)),
-			NumEnums:      18,
-			NumMessages:   33,
+			NumEnums:      20,
+			NumMessages:   34,
 			NumExtensions: 0,
 			NumServices:   0,
 		},
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 92c6fe2c..ccc059bf 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -276,6 +276,9 @@ github.com/fxamacker/cbor/v2
 github.com/fzipp/gocyclo
 # github.com/gabriel-vasile/mimetype v1.4.9
 ## explicit; go 1.23.0
+# github.com/ghodss/yaml v1.0.0
+## explicit
+github.com/ghodss/yaml
 # github.com/ghostiam/protogetter v0.3.9
 ## explicit; go 1.22.0
 github.com/ghostiam/protogetter
@@ -705,6 +708,8 @@ github.com/kisielk/errcheck/errcheck
 # github.com/kkHAIKE/contextcheck v1.1.6
 ## explicit; go 1.23.0
 github.com/kkHAIKE/contextcheck
+# github.com/krishicks/yaml-patch v0.0.10
+## explicit
 # github.com/kulti/thelper v0.6.3
 ## explicit; go 1.18
 github.com/kulti/thelper/pkg/analyzer
@@ -848,7 +853,7 @@ github.com/nunnatsa/ginkgolinter/version
 # github.com/olekukonko/tablewriter v0.0.5
 ## explicit; go 1.12
 github.com/olekukonko/tablewriter
-# github.com/onsi/ginkgo/v2 v2.23.4
+# github.com/onsi/ginkgo/v2 v2.25.3
 ## explicit; go 1.23.0
 github.com/onsi/ginkgo/v2
 github.com/onsi/ginkgo/v2/config
@@ -870,7 +875,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support
 github.com/onsi/ginkgo/v2/internal/testingtproxy
 github.com/onsi/ginkgo/v2/reporters
 github.com/onsi/ginkgo/v2/types
-# github.com/onsi/gomega v1.38.0
+# github.com/onsi/gomega v1.38.2
 ## explicit; go 1.23.0
 github.com/onsi/gomega
 github.com/onsi/gomega/format
@@ -1121,6 +1126,9 @@ github.com/uudashr/iface/identical
 github.com/uudashr/iface/internal/directive
 github.com/uudashr/iface/opaque
 github.com/uudashr/iface/unused
+# github.com/vmware-archive/yaml-patch v0.0.11
+## explicit
+github.com/vmware-archive/yaml-patch
 # github.com/x448/float16 v0.8.4
 ## explicit; go 1.11
 github.com/x448/float16
@@ -1267,7 +1275,7 @@ go.yaml.in/yaml/v2
 # go.yaml.in/yaml/v3 v3.0.4
 ## explicit; go 1.16
 go.yaml.in/yaml/v3
-# golang.org/x/crypto v0.40.0
+# golang.org/x/crypto v0.41.0
 ## explicit; go 1.23.0
 golang.org/x/crypto/cryptobyte
 golang.org/x/crypto/cryptobyte/asn1
@@ -1284,13 +1292,13 @@ golang.org/x/exp/slices
 # golang.org/x/exp/typeparams v0.0.0-20250210185358-939b2ce775ac
 ## explicit; go 1.18
 golang.org/x/exp/typeparams
-# golang.org/x/mod v0.26.0
+# golang.org/x/mod v0.27.0
 ## explicit; go 1.23.0
 golang.org/x/mod/internal/lazyregexp
 golang.org/x/mod/modfile
 golang.org/x/mod/module
 golang.org/x/mod/semver
-# golang.org/x/net v0.42.0
+# golang.org/x/net v0.43.0
 ## explicit; go 1.23.0
 golang.org/x/net/context
 golang.org/x/net/html
@@ -1313,14 +1321,14 @@ golang.org/x/oauth2/internal
 golang.org/x/sync/errgroup
 golang.org/x/sync/semaphore
 golang.org/x/sync/singleflight
-# golang.org/x/sys v0.34.0
+# golang.org/x/sys v0.35.0
 ## explicit; go 1.23.0
 golang.org/x/sys/cpu
 golang.org/x/sys/plan9
 golang.org/x/sys/unix
 golang.org/x/sys/windows
 golang.org/x/sys/windows/registry
-# golang.org/x/telemetry v0.0.0-20250710130107-8d8967aff50b
+# golang.org/x/telemetry v0.0.0-20250807160809-1a19826ec488
 ## explicit; go 1.23.0
 golang.org/x/telemetry
 golang.org/x/telemetry/counter
@@ -1331,10 +1339,10 @@ golang.org/x/telemetry/internal/crashmonitor
 golang.org/x/telemetry/internal/mmap
 golang.org/x/telemetry/internal/telemetry
 golang.org/x/telemetry/internal/upload
-# golang.org/x/term v0.33.0
+# golang.org/x/term v0.34.0
 ## explicit; go 1.23.0
 golang.org/x/term
-# golang.org/x/text v0.27.0
+# golang.org/x/text v0.28.0
 ## explicit; go 1.23.0
 golang.org/x/text/cases
 golang.org/x/text/encoding
@@ -1369,7 +1377,7 @@ golang.org/x/text/width
 # golang.org/x/time v0.12.0
 ## explicit; go 1.23.0
 golang.org/x/time/rate
-# golang.org/x/tools v0.35.0
+# golang.org/x/tools v0.36.0
 ## explicit; go 1.23.0
 golang.org/x/tools/cover
 golang.org/x/tools/go/analysis
@@ -1562,7 +1570,7 @@ google.golang.org/grpc/serviceconfig
 google.golang.org/grpc/stats
 google.golang.org/grpc/status
 google.golang.org/grpc/tap
-# google.golang.org/protobuf v1.36.6
+# google.golang.org/protobuf v1.36.7
 ## explicit; go 1.22
 google.golang.org/protobuf/encoding/protodelim
 google.golang.org/protobuf/encoding/protojson
@@ -2578,7 +2586,7 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/client/metrics
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/pkg/common/metrics
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client/proto/client
-# sigs.k8s.io/controller-runtime v0.20.4
+# sigs.k8s.io/controller-runtime v0.20.5-0.20250517180713-32e5e9e948a5
 ## explicit; go 1.23.0
 sigs.k8s.io/controller-runtime
 sigs.k8s.io/controller-runtime/pkg/builder
@@ -2596,6 +2604,7 @@ sigs.k8s.io/controller-runtime/pkg/controller/controllerutil
 sigs.k8s.io/controller-runtime/pkg/controller/priorityqueue
 sigs.k8s.io/controller-runtime/pkg/conversion
 sigs.k8s.io/controller-runtime/pkg/envtest
+sigs.k8s.io/controller-runtime/pkg/envtest/komega
 sigs.k8s.io/controller-runtime/pkg/event
 sigs.k8s.io/controller-runtime/pkg/handler
 sigs.k8s.io/controller-runtime/pkg/healthz
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/OWNERS b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/OWNERS
new file mode 100644
index 00000000..45f63b0e
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/OWNERS
@@ -0,0 +1,13 @@
+approvers:
+  - controller-runtime-admins
+  - controller-runtime-maintainers
+  - controller-runtime-approvers
+  - schrej
+  - JoelSpeed
+reviewers:
+  - controller-runtime-admins
+  - controller-runtime-maintainers
+  - controller-runtime-approvers
+  - controller-runtime-reviewers
+  - schrej
+  - JoelSpeed
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/default.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/default.go
new file mode 100644
index 00000000..dad1f551
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/default.go
@@ -0,0 +1,102 @@
+package komega
+
+import (
+	"context"
+
+	"sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// defaultK is the Komega used by the package global functions.
+var defaultK = &komega{ctx: context.Background()}
+
+// SetClient sets the client used by the package global functions.
+func SetClient(c client.Client) {
+	defaultK.client = c
+}
+
+// SetContext sets the context used by the package global functions.
+func SetContext(c context.Context) {
+	defaultK.ctx = c
+}
+
+func checkDefaultClient() {
+	if defaultK.client == nil {
+		panic("Default Komega's client is not set. Use SetClient to set it.")
+	}
+}
+
+// Get returns a function that fetches a resource and returns the occurring error.
+// It can be used with gomega.Eventually() like this
+//
+//	deployment := appsv1.Deployment{ ... }
+//	gomega.Eventually(komega.Get(&deployment)).Should(gomega.Succeed())
+//
+// By calling the returned function directly it can also be used with gomega.Expect(komega.Get(...)()).To(...)
+func Get(obj client.Object) func() error {
+	checkDefaultClient()
+	return defaultK.Get(obj)
+}
+
+// List returns a function that lists resources and returns the occurring error.
+// It can be used with gomega.Eventually() like this
+//
+//	deployments := v1.DeploymentList{ ... }
+//	gomega.Eventually(k.List(&deployments)).Should(gomega.Succeed())
+//
+// By calling the returned function directly it can also be used as gomega.Expect(k.List(...)()).To(...)
+func List(list client.ObjectList, opts ...client.ListOption) func() error {
+	checkDefaultClient()
+	return defaultK.List(list, opts...)
+}
+
+// Update returns a function that fetches a resource, applies the provided update function and then updates the resource.
+// It can be used with gomega.Eventually() like this:
+//
+//	deployment := appsv1.Deployment{ ... }
+//	gomega.Eventually(k.Update(&deployment, func() {
+//	  deployment.Spec.Replicas = 3
+//	})).Should(gomega.Succeed())
+//
+// By calling the returned function directly it can also be used as gomega.Expect(k.Update(...)()).To(...)
+func Update(obj client.Object, f func(), opts ...client.UpdateOption) func() error {
+	checkDefaultClient()
+	return defaultK.Update(obj, f, opts...)
+}
+
+// UpdateStatus returns a function that fetches a resource, applies the provided update function and then updates the resource's status.
+// It can be used with gomega.Eventually() like this:
+//
+//	deployment := appsv1.Deployment{ ... }
+//	gomega.Eventually(k.UpdateStatus(&deployment, func() {
+//	  deployment.Status.AvailableReplicas = 1
+//	})).Should(gomega.Succeed())
+//
+// By calling the returned function directly it can also be used as gomega.Expect(k.UpdateStatus(...)()).To(...)
+func UpdateStatus(obj client.Object, f func(), opts ...client.SubResourceUpdateOption) func() error {
+	checkDefaultClient()
+	return defaultK.UpdateStatus(obj, f, opts...)
+}
+
+// Object returns a function that fetches a resource and returns the object.
+// It can be used with gomega.Eventually() like this:
+//
+//	deployment := appsv1.Deployment{ ... }
+//	gomega.Eventually(k.Object(&deployment)).Should(HaveField("Spec.Replicas", gomega.Equal(ptr.To(3))))
+//
+// By calling the returned function directly it can also be used as gomega.Expect(k.Object(...)()).To(...)
+func Object(obj client.Object) func() (client.Object, error) {
+	checkDefaultClient()
+	return defaultK.Object(obj)
+}
+
+// ObjectList returns a function that fetches a resource and returns the object.
+// It can be used with gomega.Eventually() like this:
+//
+//	deployments := appsv1.DeploymentList{ ... }
+//	gomega.Eventually(k.ObjectList(&deployments)).Should(HaveField("Items", HaveLen(1)))
+//
+// By calling the returned function directly it can also be used as gomega.Expect(k.ObjectList(...)()).To(...)
+func ObjectList(list client.ObjectList, opts ...client.ListOption) func() (client.ObjectList, error) {
+	checkDefaultClient()
+	return defaultK.ObjectList(list, opts...)
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/equalobject.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/equalobject.go
new file mode 100644
index 00000000..a931c271
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/equalobject.go
@@ -0,0 +1,297 @@
+/*
+Copyright 2022 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+    http://www.apache.org/licenses/LICENSE-2.0
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package komega
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+
+	"github.com/google/go-cmp/cmp"
+	"github.com/onsi/gomega/format"
+	"github.com/onsi/gomega/types"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+// These package variables hold pre-created commonly used options that can be used to reduce the manual work involved in
+// identifying the paths that need to be compared for testing equality between objects.
+var (
+	// IgnoreAutogeneratedMetadata contains the paths for all the metadata fields that are commonly set by the
+	// client and APIServer. This is used as a MatchOption for situations when only user-provided metadata is relevant.
+	IgnoreAutogeneratedMetadata = IgnorePaths{
+		"metadata.uid",
+		"metadata.generation",
+		"metadata.creationTimestamp",
+		"metadata.resourceVersion",
+		"metadata.managedFields",
+		"metadata.deletionGracePeriodSeconds",
+		"metadata.deletionTimestamp",
+		"metadata.selfLink",
+		"metadata.generateName",
+	}
+)
+
+type diffPath struct {
+	types []string
+	json  []string
+}
+
+// equalObjectMatcher is a Gomega matcher used to establish equality between two Kubernetes runtime.Objects.
+type equalObjectMatcher struct {
+	// original holds the object that will be used to Match.
+	original runtime.Object
+
+	// diffPaths contains the paths that differ between two objects.
+	diffPaths []diffPath
+
+	// options holds the options that identify what should and should not be matched.
+	options *EqualObjectOptions
+}
+
+// EqualObject returns a Matcher for the passed Kubernetes runtime.Object with the passed Options. This function can be
+// used as a Gomega Matcher in Gomega Assertions.
+func EqualObject(original runtime.Object, opts ...EqualObjectOption) types.GomegaMatcher {
+	matchOptions := &EqualObjectOptions{}
+	matchOptions = matchOptions.ApplyOptions(opts)
+
+	return &equalObjectMatcher{
+		options:  matchOptions,
+		original: original,
+	}
+}
+
+// Match compares the current object to the passed object and returns true if the objects are the same according to
+// the Matcher and MatchOptions.
+func (m *equalObjectMatcher) Match(actual interface{}) (success bool, err error) {
+	// Nil checks required first here for:
+	//     1) Nil equality which returns true
+	//     2) One object nil which returns an error
+	actualIsNil := reflect.ValueOf(actual).IsNil()
+	originalIsNil := reflect.ValueOf(m.original).IsNil()
+
+	if actualIsNil && originalIsNil {
+		return true, nil
+	}
+	if actualIsNil || originalIsNil {
+		return false, fmt.Errorf("can not compare an object with a nil. original %v , actual %v", m.original, actual)
+	}
+
+	m.diffPaths = m.calculateDiff(actual)
+	return len(m.diffPaths) == 0, nil
+}
+
+// FailureMessage returns a message comparing the full objects after an unexpected failure to match has occurred.
+func (m *equalObjectMatcher) FailureMessage(actual interface{}) (message string) {
+	return fmt.Sprintf("the following fields were expected to match but did not:\n%v\n%s", m.diffPaths,
+		format.Message(actual, "expected to match", m.original))
+}
+
+// NegatedFailureMessage returns a string stating that all fields matched, even though that was not expected.
+func (m *equalObjectMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+	return "it was expected that some fields do not match, but all of them did"
+}
+
+func (d diffPath) String() string {
+	return fmt.Sprintf("(%s/%s)", strings.Join(d.types, "."), strings.Join(d.json, "."))
+}
+
+// diffReporter is a custom recorder for cmp.Diff which records all paths that are
+// different between two objects.
+type diffReporter struct {
+	stack []cmp.PathStep
+
+	diffPaths []diffPath
+}
+
+func (r *diffReporter) PushStep(s cmp.PathStep) {
+	r.stack = append(r.stack, s)
+}
+
+func (r *diffReporter) Report(res cmp.Result) {
+	if !res.Equal() {
+		r.diffPaths = append(r.diffPaths, r.currentPath())
+	}
+}
+
+// currentPath converts the current stack into string representations that match
+// the IgnorePaths and MatchPaths syntax.
+func (r *diffReporter) currentPath() diffPath {
+	p := diffPath{types: []string{""}, json: []string{""}}
+	for si, s := range r.stack[1:] {
+		switch s := s.(type) {
+		case cmp.StructField:
+			p.types = append(p.types, s.String()[1:])
+			// fetch the type information from the parent struct.
+			// Note: si has an offset of 1 compared to r.stack as we loop over r.stack[1:], so we don't need -1
+			field := r.stack[si].Type().Field(s.Index())
+			p.json = append(p.json, strings.Split(field.Tag.Get("json"), ",")[0])
+		case cmp.SliceIndex:
+			key := fmt.Sprintf("[%d]", s.Key())
+			p.types[len(p.types)-1] += key
+			p.json[len(p.json)-1] += key
+		case cmp.MapIndex:
+			key := fmt.Sprintf("%v", s.Key())
+			if strings.ContainsAny(key, ".[]/\\") {
+				key = fmt.Sprintf("[%s]", key)
+				p.types[len(p.types)-1] += key
+				p.json[len(p.json)-1] += key
+			} else {
+				p.types = append(p.types, key)
+				p.json = append(p.json, key)
+			}
+		}
+	}
+	// Empty strings were added as the first element. If they're still empty, remove them again.
+	if len(p.json) > 0 && len(p.json[0]) == 0 {
+		p.json = p.json[1:]
+		p.types = p.types[1:]
+	}
+	return p
+}
+
+func (r *diffReporter) PopStep() {
+	r.stack = r.stack[:len(r.stack)-1]
+}
+
+// calculateDiff calculates the difference between two objects and returns the
+// paths of the fields that do not match.
+func (m *equalObjectMatcher) calculateDiff(actual interface{}) []diffPath {
+	var original interface{} = m.original
+	// Remove the wrapping Object from unstructured.Unstructured to make comparison behave similar to
+	// regular objects.
+	if u, isUnstructured := actual.(runtime.Unstructured); isUnstructured {
+		actual = u.UnstructuredContent()
+	}
+	if u, ok := m.original.(runtime.Unstructured); ok {
+		original = u.UnstructuredContent()
+	}
+	r := diffReporter{}
+	cmp.Diff(original, actual, cmp.Reporter(&r))
+	return filterDiffPaths(*m.options, r.diffPaths)
+}
+
+// filterDiffPaths filters the diff paths using the paths in EqualObjectOptions.
+func filterDiffPaths(opts EqualObjectOptions, paths []diffPath) []diffPath {
+	result := []diffPath{}
+
+	for _, p := range paths {
+		if len(opts.matchPaths) > 0 && !hasAnyPathPrefix(p, opts.matchPaths) {
+			continue
+		}
+		if hasAnyPathPrefix(p, opts.ignorePaths) {
+			continue
+		}
+
+		result = append(result, p)
+	}
+
+	return result
+}
+
+// hasPathPrefix compares the segments of a path.
+func hasPathPrefix(path []string, prefix []string) bool {
+	for i, p := range prefix {
+		if i >= len(path) {
+			return false
+		}
+		// return false if a segment doesn't match
+		if path[i] != p && (i < len(prefix)-1 || !segmentHasPrefix(path[i], p)) {
+			return false
+		}
+	}
+	return true
+}
+
+func segmentHasPrefix(s, prefix string) bool {
+	return len(s) >= len(prefix) && s[0:len(prefix)] == prefix &&
+		// if it is a prefix match, make sure the next character is a [ for array/map access
+		(len(s) == len(prefix) || s[len(prefix)] == '[')
+}
+
+// hasAnyPathPrefix returns true if path matches any of the path prefixes.
+// It respects the name boundaries within paths, so 'ObjectMeta.Name' does not
+// match 'ObjectMeta.Namespace' for example.
+func hasAnyPathPrefix(path diffPath, prefixes [][]string) bool {
+	for _, prefix := range prefixes {
+		if hasPathPrefix(path.types, prefix) || hasPathPrefix(path.json, prefix) {
+			return true
+		}
+	}
+	return false
+}
+
+// EqualObjectOption describes an Option that can be applied to a Matcher.
+type EqualObjectOption interface {
+	// ApplyToEqualObjectMatcher applies this configuration to the given MatchOption.
+	ApplyToEqualObjectMatcher(options *EqualObjectOptions)
+}
+
+// EqualObjectOptions holds the available types of EqualObjectOptions that can be applied to a Matcher.
+type EqualObjectOptions struct {
+	ignorePaths [][]string
+	matchPaths  [][]string
+}
+
+// ApplyOptions adds the passed MatchOptions to the MatchOptions struct.
+func (o *EqualObjectOptions) ApplyOptions(opts []EqualObjectOption) *EqualObjectOptions {
+	for _, opt := range opts {
+		opt.ApplyToEqualObjectMatcher(o)
+	}
+	return o
+}
+
+// IgnorePaths instructs the Matcher to ignore given paths when computing a diff.
+// Paths are written in a syntax similar to Go with a few special cases. Both types and
+// json/yaml field names are supported.
+//
+// Regular Paths:
+// * "ObjectMeta.Name"
+// * "metadata.name"
+// Arrays:
+// * "metadata.ownerReferences[0].name"
+// Maps, if they do not contain any of .[]/\:
+// * "metadata.labels.something"
+// Maps, if they contain any of .[]/\:
+// * "metadata.labels[kubernetes.io/something]"
+type IgnorePaths []string
+
+// ApplyToEqualObjectMatcher applies this configuration to the given MatchOptions.
+func (i IgnorePaths) ApplyToEqualObjectMatcher(opts *EqualObjectOptions) {
+	for _, p := range i {
+		opts.ignorePaths = append(opts.ignorePaths, strings.Split(p, "."))
+	}
+}
+
+// MatchPaths instructs the Matcher to restrict its diff to the given paths. If empty the Matcher will look at all paths.
+// Paths are written in a syntax similar to Go with a few special cases. Both types and
+// json/yaml field names are supported.
+//
+// Regular Paths:
+// * "ObjectMeta.Name"
+// * "metadata.name"
+// Arrays:
+// * "metadata.ownerReferences[0].name"
+// Maps, if they do not contain any of .[]/\:
+// * "metadata.labels.something"
+// Maps, if they contain any of .[]/\:
+// * "metadata.labels[kubernetes.io/something]"
+type MatchPaths []string
+
+// ApplyToEqualObjectMatcher applies this configuration to the given MatchOptions.
+func (i MatchPaths) ApplyToEqualObjectMatcher(opts *EqualObjectOptions) {
+	for _, p := range i {
+		opts.matchPaths = append(opts.ignorePaths, strings.Split(p, "."))
+	}
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/interfaces.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/interfaces.go
new file mode 100644
index 00000000..b412e5c1
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/interfaces.go
@@ -0,0 +1,76 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package komega
+
+import (
+	"context"
+
+	"sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// Komega is a collection of utilites for writing tests involving a mocked
+// Kubernetes API.
+type Komega interface {
+	// Get returns a function that fetches a resource and returns the occurring error.
+	// It can be used with gomega.Eventually() like this
+	//   deployment := appsv1.Deployment{ ... }
+	//   gomega.Eventually(k.Get(&deployment)).To(gomega.Succeed())
+	// By calling the returned function directly it can also be used with gomega.Expect(k.Get(...)()).To(...)
+	Get(client.Object) func() error
+
+	// List returns a function that lists resources and returns the occurring error.
+	// It can be used with gomega.Eventually() like this
+	//   deployments := v1.DeploymentList{ ... }
+	//   gomega.Eventually(k.List(&deployments)).To(gomega.Succeed())
+	// By calling the returned function directly it can also be used as gomega.Expect(k.List(...)()).To(...)
+	List(client.ObjectList, ...client.ListOption) func() error
+
+	// Update returns a function that fetches a resource, applies the provided update function and then updates the resource.
+	// It can be used with gomega.Eventually() like this:
+	//   deployment := appsv1.Deployment{ ... }
+	//   gomega.Eventually(k.Update(&deployment, func() {
+	//     deployment.Spec.Replicas = 3
+	//   })).To(gomega.Succeed())
+	// By calling the returned function directly it can also be used as gomega.Expect(k.Update(...)()).To(...)
+	Update(client.Object, func(), ...client.UpdateOption) func() error
+
+	// UpdateStatus returns a function that fetches a resource, applies the provided update function and then updates the resource's status.
+	// It can be used with gomega.Eventually() like this:
+	//   deployment := appsv1.Deployment{ ... }
+	//   gomega.Eventually(k.Update(&deployment, func() {
+	//     deployment.Status.AvailableReplicas = 1
+	//   })).To(gomega.Succeed())
+	// By calling the returned function directly it can also be used as gomega.Expect(k.UpdateStatus(...)()).To(...)
+	UpdateStatus(client.Object, func(), ...client.SubResourceUpdateOption) func() error
+
+	// Object returns a function that fetches a resource and returns the object.
+	// It can be used with gomega.Eventually() like this:
+	//   deployment := appsv1.Deployment{ ... }
+	//   gomega.Eventually(k.Object(&deployment)).To(HaveField("Spec.Replicas", gomega.Equal(ptr.To(int32(3)))))
+	// By calling the returned function directly it can also be used as gomega.Expect(k.Object(...)()).To(...)
+	Object(client.Object) func() (client.Object, error)
+
+	// ObjectList returns a function that fetches a resource and returns the object.
+	// It can be used with gomega.Eventually() like this:
+	//   deployments := appsv1.DeploymentList{ ... }
+	//   gomega.Eventually(k.ObjectList(&deployments)).To(HaveField("Items", HaveLen(1)))
+	// By calling the returned function directly it can also be used as gomega.Expect(k.ObjectList(...)()).To(...)
+	ObjectList(client.ObjectList, ...client.ListOption) func() (client.ObjectList, error)
+
+	// WithContext returns a copy that uses the given context.
+	WithContext(context.Context) Komega
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/komega.go b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/komega.go
new file mode 100644
index 00000000..e19d9b5f
--- /dev/null
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/envtest/komega/komega.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2021 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package komega
+
+import (
+	"context"
+
+	"k8s.io/apimachinery/pkg/types"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// komega is a collection of utilites for writing tests involving a mocked
+// Kubernetes API.
+type komega struct {
+	ctx    context.Context
+	client client.Client
+}
+
+var _ Komega = &komega{}
+
+// New creates a new Komega instance with the given client.
+func New(c client.Client) Komega {
+	return &komega{
+		client: c,
+		ctx:    context.Background(),
+	}
+}
+
+// WithContext returns a copy that uses the given context.
+func (k komega) WithContext(ctx context.Context) Komega {
+	k.ctx = ctx
+	return &k
+}
+
+// Get returns a function that fetches a resource and returns the occurring error.
+func (k *komega) Get(obj client.Object) func() error {
+	key := types.NamespacedName{
+		Name:      obj.GetName(),
+		Namespace: obj.GetNamespace(),
+	}
+	return func() error {
+		return k.client.Get(k.ctx, key, obj)
+	}
+}
+
+// List returns a function that lists resources and returns the occurring error.
+func (k *komega) List(obj client.ObjectList, opts ...client.ListOption) func() error {
+	return func() error {
+		return k.client.List(k.ctx, obj, opts...)
+	}
+}
+
+// Update returns a function that fetches a resource, applies the provided update function and then updates the resource.
+func (k *komega) Update(obj client.Object, updateFunc func(), opts ...client.UpdateOption) func() error {
+	key := types.NamespacedName{
+		Name:      obj.GetName(),
+		Namespace: obj.GetNamespace(),
+	}
+	return func() error {
+		err := k.client.Get(k.ctx, key, obj)
+		if err != nil {
+			return err
+		}
+		updateFunc()
+		return k.client.Update(k.ctx, obj, opts...)
+	}
+}
+
+// UpdateStatus returns a function that fetches a resource, applies the provided update function and then updates the resource's status.
+func (k *komega) UpdateStatus(obj client.Object, updateFunc func(), opts ...client.SubResourceUpdateOption) func() error {
+	key := types.NamespacedName{
+		Name:      obj.GetName(),
+		Namespace: obj.GetNamespace(),
+	}
+	return func() error {
+		err := k.client.Get(k.ctx, key, obj)
+		if err != nil {
+			return err
+		}
+		updateFunc()
+		return k.client.Status().Update(k.ctx, obj, opts...)
+	}
+}
+
+// Object returns a function that fetches a resource and returns the object.
+func (k *komega) Object(obj client.Object) func() (client.Object, error) {
+	key := types.NamespacedName{
+		Name:      obj.GetName(),
+		Namespace: obj.GetNamespace(),
+	}
+	return func() (client.Object, error) {
+		err := k.client.Get(k.ctx, key, obj)
+		return obj, err
+	}
+}
+
+// ObjectList returns a function that fetches a resource and returns the object.
+func (k *komega) ObjectList(obj client.ObjectList, opts ...client.ListOption) func() (client.ObjectList, error) {
+	return func() (client.ObjectList, error) {
+		err := k.client.List(k.ctx, obj, opts...)
+		return obj, err
+	}
+}
diff --git a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go
index 2f7820d0..ef9c4562 100644
--- a/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go
+++ b/vendor/sigs.k8s.io/controller-runtime/pkg/webhook/admission/multi.go
@@ -31,6 +31,7 @@ type multiMutating []Handler
 
 func (hs multiMutating) Handle(ctx context.Context, req Request) Response {
 	patches := []jsonpatch.JsonPatchOperation{}
+	warnings := []string{}
 	for _, handler := range hs {
 		resp := handler.Handle(ctx, req)
 		if !resp.Allowed {
@@ -42,6 +43,7 @@ func (hs multiMutating) Handle(ctx context.Context, req Request) Response {
 					resp.PatchType, admissionv1.PatchTypeJSONPatch))
 		}
 		patches = append(patches, resp.Patches...)
+		warnings = append(warnings, resp.Warnings...)
 	}
 	var err error
 	marshaledPatch, err := json.Marshal(patches)
@@ -55,6 +57,7 @@ func (hs multiMutating) Handle(ctx context.Context, req Request) Response {
 				Code: http.StatusOK,
 			},
 			Patch:     marshaledPatch,
+			Warnings:  warnings,
 			PatchType: func() *admissionv1.PatchType { pt := admissionv1.PatchTypeJSONPatch; return &pt }(),
 		},
 	}
@@ -71,11 +74,13 @@ func MultiMutatingHandler(handlers ...Handler) Handler {
 type multiValidating []Handler
 
 func (hs multiValidating) Handle(ctx context.Context, req Request) Response {
+	warnings := []string{}
 	for _, handler := range hs {
 		resp := handler.Handle(ctx, req)
 		if !resp.Allowed {
 			return resp
 		}
+		warnings = append(warnings, resp.Warnings...)
 	}
 	return Response{
 		AdmissionResponse: admissionv1.AdmissionResponse{
@@ -83,6 +88,7 @@ func (hs multiValidating) Handle(ctx context.Context, req Request) Response {
 			Result: &metav1.Status{
 				Code: http.StatusOK,
 			},
+			Warnings: warnings,
 		},
 	}
 }

From f52c2a499f96f0f42af0b6ce543eb1a15efa4af1 Mon Sep 17 00:00:00 2001
From: Bharath B 
Date: Fri, 26 Sep 2025 11:54:19 +0530
Subject: [PATCH 2/7] ESO-101: Adds API integration test suite

Signed-off-by: Bharath B 
---
 Makefile                                      |  33 +-
 .../externalsecretsconfig.testsuite.yaml      | 683 ++++++++++++++++++
 .../externalsecretsmanager.testsuite.yaml     | 382 ++++++++++
 hack/go-fips.sh                               |   0
 hack/test-apis.sh                             |  33 +
 test/apis/README.md                           |   1 +
 test/apis/generator.go                        | 586 +++++++++++++++
 test/apis/suite_test.go                       |  86 +++
 test/apis/vars.go                             | 143 ++++
 tools/tools.go                                |   1 +
 10 files changed, 1942 insertions(+), 6 deletions(-)
 create mode 100644 api/v1alpha1/tests/externalsecretsconfig.operator.openshift.io/externalsecretsconfig.testsuite.yaml
 create mode 100644 api/v1alpha1/tests/externalsecretsmanager.operator.openshift.io/externalsecretsmanager.testsuite.yaml
 mode change 100644 => 100755 hack/go-fips.sh
 create mode 100755 hack/test-apis.sh
 create mode 100644 test/apis/README.md
 create mode 100644 test/apis/generator.go
 create mode 100644 test/apis/suite_test.go
 create mode 100644 test/apis/vars.go

diff --git a/Makefile b/Makefile
index 1357b5ef..f490dd68 100644
--- a/Makefile
+++ b/Makefile
@@ -55,7 +55,7 @@ OPERATOR_SDK_VERSION ?= v1.39.0
 # Image URL to use all building/pushing image targets
 IMG ?= openshift.io/external-secrets-operator:latest
 # ENVTEST_K8S_VERSION refers to the version of kubebuilder assets to be downloaded by envtest binary.
-ENVTEST_K8S_VERSION = 1.31.0
+ENVTEST_K8S_VERSION = 1.32.0
 
 # Get the currently used golang install path (in GOPATH/bin, unless GOBIN is set)
 ifeq (,$(shell go env GOBIN))
@@ -127,8 +127,11 @@ vet: ## Run go vet against code.
 	go vet ./...
 
 .PHONY: test
-test: manifests generate fmt vet envtest ## Run tests.
-	KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test $$(go list ./... | grep -v /e2e) -coverprofile cover.out
+test: manifests generate fmt vet envtest test-apis test-unit ## Run tests.
+
+.PHONY: test-unit
+test-unit: vet ## Run unit tests.
+	go test $$(go list ./... | grep -vE 'test/[e2e|apis|utils]') -coverprofile cover.out
 
 update-operand-manifests: helm yq
 	hack/update-external-secrets-manifests.sh $(EXTERNAL_SECRETS_VERSION)
@@ -250,6 +253,7 @@ YQ = $(LOCALBIN)/yq
 HELM ?= $(LOCALBIN)/helm
 REFERENCE_DOC_GENERATOR ?= $(LOCALBIN)/crd-ref-docs
 GOVULNCHECK ?= $(LOCALBIN)/govulncheck
+GINKGO ?= $(LOCALBIN)/ginkgo
 
 ## Tool Versions
 YQ_VERSION = v4.45.2
@@ -283,11 +287,15 @@ crd-ref-docs: $(LOCALBIN) ## Download crd-ref-docs locally if necessary.
 govulncheck: $(LOCALBIN) ## Download govulncheck locally if necessary.
 	$(call go-install-tool,$(GOVULNCHECK),golang.org/x/vuln/cmd/govulncheck)
 
+.PHONY: ginkgo
+ginkgo: $(LOCALBIN) ## Download ginkgo locally if necessary.
+	$(call go-install-tool,$(GINKGO),github.com/onsi/ginkgo/v2/ginkgo)
+
 # go-install-tool will 'go install' any package with custom target and name of binary, if it doesn't exist
 # $1 - target path with name of binary
 # $2 - package url which can be installed
 define go-install-tool
-@[ -f "$(1)" ] || { \
+@{ \
 set -e; \
 package=$(2) ;\
 echo "Downloading $${package}" ;\
@@ -409,9 +417,22 @@ docs: crd-ref-docs
 
 ## perform vulnerabilities scan using govulncheck.
 .PHONY: govulnscan
-#GO-2025-3547 and GO-2025-3521 containing code is not directly used in the operator, hence will be ignored.
-KNOWN_VULNERABILITIES:="GO-2025-3547|GO-2025-3521"
+#The ignored vulnerabilities are not in the operator code, but in the vendored packages.
+# - https://pkg.go.dev/vuln/GO-2025-3956
+# - https://pkg.go.dev/vuln/GO-2025-3915
+# - https://pkg.go.dev/vuln/GO-2025-3547
+# _ https://pkg.go.dev/vuln/GO-2025-3521
+KNOWN_VULNERABILITIES:="GO-2025-3547|GO-2025-3521|GO-2025-3956|GO-2025-3915"
 govulnscan: govulncheck $(OUTPUTS_PATH)  ## Run govulncheck
 	- $(GOVULNCHECK) ./... > $(OUTPUTS_PATH)/govulcheck.results 2>&1
 	$(eval reported_vulnerabilities = $(strip $(shell grep "pkg.go.dev" $(OUTPUTS_PATH)/govulcheck.results | ([ -n $KNOWN_VULNERABILITIES ] && grep -Ev $(KNOWN_VULNERABILITIES) || cat) | wc -l)))
 	@(if [ $(reported_vulnerabilities) -ne 0 ]; then echo -e "\n-- ERROR -- $(reported_vulnerabilities) new vulnerabilities reported, please check\n"; exit 1; fi)
+
+# Utilize controller-runtime provided envtest for API integration test
+.PHONY: test-apis  ## Run only the api integration tests.
+test-apis: envtest ginkgo
+	KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" ./hack/test-apis.sh
+
+.PHONY: clean
+clean:
+	rm -rf $(LOCALBIN) $(OUTPUTS_PATH) cover.out dist
\ No newline at end of file
diff --git a/api/v1alpha1/tests/externalsecretsconfig.operator.openshift.io/externalsecretsconfig.testsuite.yaml b/api/v1alpha1/tests/externalsecretsconfig.operator.openshift.io/externalsecretsconfig.testsuite.yaml
new file mode 100644
index 00000000..de1ca3a5
--- /dev/null
+++ b/api/v1alpha1/tests/externalsecretsconfig.operator.openshift.io/externalsecretsconfig.testsuite.yaml
@@ -0,0 +1,683 @@
+apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this
+name: "ExternalSecretsConfig"
+crdName: externalsecretsconfigs.operator.openshift.io
+tests:
+  onCreate:
+    - name: Should be able to create a minimal ExternalSecretsConfig instance
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec: {} # No spec is required for a ExternalSecretsConfig
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec: {}
+    - name: Should be able to create ExternalSecretsConfig with ControllerConfig spec
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            labels:
+              "app": "external-secrets"
+            periodicReconcileInterval: 600
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            labels:
+              "app": "external-secrets"
+            periodicReconcileInterval: 600
+    - name: Should be able to create ExternalSecretsConfig with ApplicationConfig spec
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          appConfig:
+            operatingNamespace: "test-namespace"
+            logLevel: 3
+            webhookConfig:
+              certificateCheckInterval: "10m"
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+              secretRef:
+                name: "bitwarden-tls-certs"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          appConfig:
+            operatingNamespace: "test-namespace"
+            logLevel: 3
+            webhookConfig:
+              certificateCheckInterval: "10m"
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+              secretRef:
+                name: "bitwarden-tls-certs"
+    - name: Should be able to create ExternalSecretsConfig with cert-manager configuration
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                issuerRef:
+                  name: "letsencrypt-issuer"
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+                injectAnnotations: "true"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 300
+            certProvider:
+              certManager:
+                mode: Enabled
+                issuerRef:
+                  name: "letsencrypt-issuer"
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+                injectAnnotations: "true"
+                certificateDuration: "8760h"
+                certificateRenewBefore: "30m"
+    - name: Should fail to create with invalid singleton name
+      resourceName: not-cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec: {}
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"not-cluster\" is invalid: : Invalid value: \"object\": ExternalSecretsConfig is a singleton, .metadata.name must be 'cluster'"
+    - name: Should fail to create cert-manager enabled without issuerRef
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec.controllerConfig.certProvider.certManager: Invalid value: \"object\": issuerRef must be provided when mode is set to Enabled."
+    - name: Should fail with injectAnnotations without cert-manager enabled
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Disabled
+                injectAnnotations: "true"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec.controllerConfig.certProvider.certManager: Invalid value: \"object\": injectAnnotations can only be set when mode is set to Enabled."
+    - name: Should fail with injectAnnotations true when mode is Disabled explicitly
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Disabled
+                injectAnnotations: "true"
+                issuerRef:
+                  name: "test-issuer"
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec.controllerConfig.certProvider.certManager: Invalid value: \"object\": injectAnnotations can only be set when mode is set to Enabled."
+    - name: Should allow injectAnnotations false when mode is Disabled
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Disabled
+                injectAnnotations: "false"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 300
+            certProvider:
+              certManager:
+                mode: Disabled
+                injectAnnotations: "false"
+                certificateDuration: "8760h"
+                certificateRenewBefore: "30m"
+    - name: Should allow omitting injectAnnotations when mode is Disabled
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Disabled
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 300
+            certProvider:
+              certManager:
+                mode: Disabled
+                certificateDuration: "8760h"
+                certificateRenewBefore: "30m"
+                injectAnnotations: "false"
+    - name: Should fail with invalid issuer kind
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                issuerRef:
+                  name: "test-issuer"
+                  kind: "InvalidKind"
+                  group: "cert-manager.io"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec.controllerConfig.certProvider.certManager.issuerRef: Invalid value: \"object\": kind must be either 'Issuer' or 'ClusterIssuer'"
+    - name: Should fail with invalid issuer group
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                issuerRef:
+                  name: "test-issuer"
+                  kind: "ClusterIssuer"
+                  group: "invalid-group"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec.controllerConfig.certProvider.certManager.issuerRef: Invalid value: \"object\": group must be 'cert-manager.io'"
+    - name: Should fail with operatingNamespace too short
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          appConfig:
+            operatingNamespace: ""
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec.appConfig.operatingNamespace: Invalid value: \"\": spec.appConfig.operatingNamespace in body should be at least 1 chars long"
+    - name: Should fail with operatingNamespace too long
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          appConfig:
+            operatingNamespace: "this-namespace-name-is-way-too-long-and-exceeds-the-maximum-allowed-length-of-sixty-three-characters-total"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: [spec.appConfig.operatingNamespace: Too long: may not be more than 63 bytes, : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]"
+    - name: Should fail with periodicReconcileInterval too low
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 100
+      expectedError: "spec.controllerConfig.periodicReconcileInterval: Invalid value: 100: spec.controllerConfig.periodicReconcileInterval in body should be greater than or equal to 120"
+    - name: Should fail with periodicReconcileInterval too high
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 20000
+      expectedError: "spec.controllerConfig.periodicReconcileInterval: Invalid value: 20000: spec.controllerConfig.periodicReconcileInterval in body should be less than or equal to 18000"
+    - name: Should fail with too many controller labels
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            labels:
+              "label-key-0": "label-value-0"
+              "label-key-1": "label-value-1"
+              "label-key-2": "label-value-2"
+              "label-key-3": "label-value-3"
+              "label-key-4": "label-value-4"
+              "label-key-5": "label-value-5"
+              "label-key-6": "label-value-6"
+              "label-key-7": "label-value-7"
+              "label-key-8": "label-value-8"
+              "label-key-9": "label-value-9"
+              "label-key-10": "label-value-10"
+              "label-key-11": "label-value-11"
+              "label-key-12": "label-value-12"
+              "label-key-13": "label-value-13"
+              "label-key-14": "label-value-14"
+              "label-key-15": "label-value-15"
+              "label-key-16": "label-value-16"
+              "label-key-17": "label-value-17"
+              "label-key-18": "label-value-18"
+              "label-key-19": "label-value-19"
+              "label-key-20": "label-value-20"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: [spec.controllerConfig.labels: Too many: 21: must have at most 20 items, : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]"
+    - name: Should fail with issuerRef name too long
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                issuerRef:
+                  name: "this-issuer-name-is-extremely-long-and-exceeds-the-kubernetes-maximum-name-length-limit-of-two-hundred-fifty-three-characters-which-is-quite-a-lot-of-characters-but-we-need-to-test-this-validation-constraint-properly-to-ensure-it-works-as-expected-in-all-scenarios"
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: [spec.controllerConfig.certProvider.certManager.issuerRef.name: Too long: may not be more than 253 bytes, : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]"
+    - name: Should fail with issuerRef name empty (required field)
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                issuerRef:
+                  name: ""
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec.controllerConfig.certProvider.certManager.issuerRef.name: Invalid value: \"\": spec.controllerConfig.certProvider.certManager.issuerRef.name in body should be at least 1 chars long"
+    - name: Should fail with issuerRef kind too long
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                issuerRef:
+                  name: "test-issuer"
+                  kind: "this-kind-name-is-extremely-long-and-exceeds-the-kubernetes-maximum-name-length-limit-of-two-hundred-fifty-three-characters-which-is-quite-a-lot-of-characters-but-we-need-to-test-this-validation-constraint-properly-to-ensure-it-works-as-expected-in-all-scenarios"
+                  group: "cert-manager.io"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: [spec.controllerConfig.certProvider.certManager.issuerRef.kind: Too long: may not be more than 253 bytes, : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]"
+    - name: Should fail with issuerRef group too long
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                issuerRef:
+                  name: "test-issuer"
+                  kind: "ClusterIssuer"
+                  group: "this-group-name-is-extremely-long-and-exceeds-the-kubernetes-maximum-name-length-limit-of-two-hundred-fifty-three-characters-which-is-quite-a-lot-of-characters-but-we-need-to-test-this-validation-constraint-properly-to-ensure-it-works-as-expected-in-all-scenarios"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: [spec.controllerConfig.certProvider.certManager.issuerRef.group: Too long: may not be more than 253 bytes, : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]"
+    - name: Should fail with bitwarden secretRef name empty (required field)
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+              secretRef:
+                name: ""
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec.plugins.bitwardenSecretManagerProvider.secretRef.name: Invalid value: \"\": spec.plugins.bitwardenSecretManagerProvider.secretRef.name in body should be at least 1 chars long"
+    - name: Should fail with bitwarden secretRef name too long
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+              secretRef:
+                name: "this-secret-name-is-extremely-long-and-exceeds-the-kubernetes-maximum-name-length-limit-of-two-hundred-fifty-three-characters-which-is-quite-a-lot-of-characters-but-we-need-to-test-this-validation-constraint-properly-to-ensure-it-works-as-expected-in-all-scenarios"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: [spec.plugins.bitwardenSecretManagerProvider.secretRef.name: Too long: may not be more than 253 bytes, : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]"
+    - name: Should fail with invalid log level too low
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          appConfig:
+            logLevel: 0
+      expectedError: "spec.appConfig.logLevel: Invalid value: 0: spec.appConfig.logLevel in body should be greater than or equal to 1"
+    - name: Should fail with invalid log level too high
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          appConfig:
+            logLevel: 10
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec.appConfig.logLevel: Invalid value: 10: spec.appConfig.logLevel in body should be less than or equal to 5"
+    - name: Should fail with bitwarden enabled but no secretRef or cert-manager
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec: Invalid value: \"object\": secretRef or certManager must be configured when bitwardenSecretManagerProvider plugin is enabled"
+    - name: Should fail with bitwarden enabled and cert-manager disabled without secretRef
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Disabled
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec: Invalid value: \"object\": secretRef or certManager must be configured when bitwardenSecretManagerProvider plugin is enabled"
+    - name: Should allow bitwarden enabled with both secretRef and cert-manager enabled
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+              secretRef:
+                name: "bitwarden-tls-certs"
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                injectAnnotations: "false"
+                issuerRef:
+                  name: "letsencrypt-issuer"
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+              secretRef:
+                name: "bitwarden-tls-certs"
+          controllerConfig:
+            periodicReconcileInterval: 300
+            certProvider:
+              certManager:
+                mode: Enabled
+                injectAnnotations: "false"
+                issuerRef:
+                  name: "letsencrypt-issuer"
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+                certificateDuration: "8760h"
+                certificateRenewBefore: "30m"
+    - name: Should allow bitwarden enabled with cert-manager enabled but no secretRef
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                injectAnnotations: "false"
+                issuerRef:
+                  name: "letsencrypt-issuer"
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+          controllerConfig:
+            periodicReconcileInterval: 300
+            certProvider:
+              certManager:
+                mode: Enabled
+                injectAnnotations: "false"
+                issuerRef:
+                  name: "letsencrypt-issuer"
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+                certificateDuration: "8760h"
+                certificateRenewBefore: "30m"
+    - name: Should allow bitwarden disabled without secretRef or cert-manager
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Disabled
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Disabled
+    - name: Should allow valid periodicReconcileInterval values
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 1800
+            labels:
+              "environment": "test"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 1800
+            labels:
+              "environment": "test"
+    - name: Should allow webhookConfig with custom certificateCheckInterval
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          appConfig:
+            webhookConfig:
+              certificateCheckInterval: "15m"
+            operatingNamespace: "test-ns"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          appConfig:
+            logLevel: 1
+            webhookConfig:
+              certificateCheckInterval: "15m"
+            operatingNamespace: "test-ns"
+    - name: Should allow minimal controllerConfig with just periodicReconcileInterval
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 120
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 120
+  onUpdate:
+    - name: Should be able to update labels in controller config
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            labels:
+              "app": "external-secrets"
+      updated: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            labels:
+              "app": "external-secrets"
+              "version": "v1.0.0"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 300
+            labels:
+              "app": "external-secrets"
+              "version": "v1.0.0"
+    - name: Should not be able to change cert-manager enabled after creation
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Disabled
+      updated: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                issuerRef:
+                  name: "letsencrypt-issuer"
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec.controllerConfig.certProvider.certManager.mode: Invalid value: \"string\": mode is immutable once set"
+    - name: Should not be able to change issuerRef after creation
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                issuerRef:
+                  name: "old-issuer"
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+      updated: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            certProvider:
+              certManager:
+                mode: Enabled
+                issuerRef:
+                  name: "new-issuer"
+                  kind: "ClusterIssuer"
+                  group: "cert-manager.io"
+      expectedError: "ExternalSecretsConfig.operator.openshift.io \"cluster\" is invalid: spec.controllerConfig.certProvider.certManager.issuerRef: Invalid value: \"object\": issuerRef is immutable once set"
+    - name: Should be able to update periodicReconcileInterval
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 300
+            labels:
+              "env": "dev"
+      updated: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 600
+            labels:
+              "env": "dev"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          controllerConfig:
+            periodicReconcileInterval: 600
+            labels:
+              "env": "dev"
+    - name: Should be able to add bitwarden provider after creation
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          appConfig: {}
+      updated: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          appConfig: {}
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+              secretRef:
+                name: "bitwarden-certs"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsConfig
+        spec:
+          appConfig:
+            logLevel: 1
+          plugins:
+            bitwardenSecretManagerProvider:
+              mode: Enabled
+              secretRef:
+                name: "bitwarden-certs"
\ No newline at end of file
diff --git a/api/v1alpha1/tests/externalsecretsmanager.operator.openshift.io/externalsecretsmanager.testsuite.yaml b/api/v1alpha1/tests/externalsecretsmanager.operator.openshift.io/externalsecretsmanager.testsuite.yaml
new file mode 100644
index 00000000..1e6715f9
--- /dev/null
+++ b/api/v1alpha1/tests/externalsecretsmanager.operator.openshift.io/externalsecretsmanager.testsuite.yaml
@@ -0,0 +1,382 @@
+apiVersion: apiextensions.k8s.io/v1 # Hack because controller-gen complains if we don't have this
+name: "ExternalSecretsManager"
+crdName: externalsecretsmanagers.operator.openshift.io
+tests:
+  onCreate:
+    - name: Should be able to create a minimal ExternalSecretsManager instance
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec: {} # No spec is required for a ExternalSecretsManager
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec: {}
+    - name: Should be able to create ExternalSecretsManager with GlobalConfig
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            labels:
+              "managed-by": "external-secrets-operator"
+              "environment": "production"
+            logLevel: 3
+            resources:
+              requests:
+                cpu: "100m"
+                memory: "128Mi"
+              limits:
+                cpu: "500m"
+                memory: "512Mi"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            labels:
+              "managed-by": "external-secrets-operator"
+              "environment": "production"
+            logLevel: 3
+            resources:
+              requests:
+                cpu: "100m"
+                memory: "128Mi"
+              limits:
+                cpu: "500m"
+                memory: "512Mi"
+    - name: Should be able to create ExternalSecretsManager with complete configuration
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            labels:
+              "team": "platform"
+              "component": "external-secrets"
+            logLevel: 4
+            nodeSelector:
+              "node-type": "worker"
+              "kubernetes.io/arch": "amd64"
+            tolerations:
+              - key: "node-role.kubernetes.io/master"
+                operator: "Exists"
+                effect: "NoSchedule"
+            proxy:
+              httpProxy: "http://proxy.example.com:8080"
+              httpsProxy: "https://proxy.example.com:8443"
+              noProxy: "localhost,127.0.0.1,.local"
+          optionalFeatures:
+            - name: ""
+              mode: Disabled
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            labels:
+              "team": "platform"
+              "component": "external-secrets"
+            logLevel: 4
+            nodeSelector:
+              "node-type": "worker"
+              "kubernetes.io/arch": "amd64"
+            tolerations:
+              - key: "node-role.kubernetes.io/master"
+                operator: "Exists"
+                effect: "NoSchedule"
+            proxy:
+              httpProxy: "http://proxy.example.com:8080"
+              httpsProxy: "https://proxy.example.com:8443"
+              noProxy: "localhost,127.0.0.1,.local"
+          optionalFeatures:
+            - name: ""
+              mode: Disabled
+    - name: Should fail to create with invalid singleton name
+      resourceName: invalid-name
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec: {}
+      expectedError: "ExternalSecretsManager.operator.openshift.io \"invalid-name\" is invalid: : Invalid value: \"object\": ExternalSecretsManager is a singleton, .metadata.name must be 'cluster'"
+    - name: Should fail with too many labels in global config
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            labels:
+              "label-key-0": "label-value-0"
+              "label-key-1": "label-value-1"
+              "label-key-2": "label-value-2"
+              "label-key-3": "label-value-3"
+              "label-key-4": "label-value-4"
+              "label-key-5": "label-value-5"
+              "label-key-6": "label-value-6"
+              "label-key-7": "label-value-7"
+              "label-key-8": "label-value-8"
+              "label-key-9": "label-value-9"
+              "label-key-10": "label-value-10"
+              "label-key-11": "label-value-11"
+              "label-key-12": "label-value-12"
+              "label-key-13": "label-value-13"
+              "label-key-14": "label-value-14"
+              "label-key-15": "label-value-15"
+              "label-key-16": "label-value-16"
+              "label-key-17": "label-value-17"
+              "label-key-18": "label-value-18"
+              "label-key-19": "label-value-19"
+              "label-key-20": "label-value-20"
+      expectedError: "ExternalSecretsManager.operator.openshift.io \"cluster\" is invalid: [spec.globalConfig.labels: Too many: 21: must have at most 20 items, : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]"
+    - name: Should fail with invalid log level too low
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 0
+      expectedError: "ExternalSecretsManager.operator.openshift.io \"cluster\" is invalid: spec.globalConfig.logLevel: Invalid value: 0: spec.globalConfig.logLevel in body should be greater than or equal to 1"
+    - name: Should fail with HTTP proxy URL too long
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            proxy:
+              httpProxy: "http://extremely-long-proxy-url-that-definitely-exceeds-2048-characters-limit-AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb.example.com:8080"
+      expectedError: "ExternalSecretsManager.operator.openshift.io \"cluster\" is invalid: [spec.globalConfig.proxy.httpProxy: Too long: may not be more than 2048 bytes, : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]"
+    - name: Should fail with HTTPS proxy URL too long
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            proxy:
+              httpsProxy: "https://extremely-long-https-proxy-url-that-definitely-exceeds-2048-characters-limit-BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb.example.com:8443"
+      expectedError: "ExternalSecretsManager.operator.openshift.io \"cluster\" is invalid: [spec.globalConfig.proxy.httpsProxy: Too long: may not be more than 2048 bytes, : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]"
+    - name: Should fail with no proxy list too long
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            proxy:
+              noProxy: "extremely-long-no-proxy-list-that-definitely-exceeds-4096-characters-limit-CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC,localhost,127.0.0.1,.local,.internal,.svc.cluster.local,10.0.0.0/8,192.168.0.0/16,172.16.0.0/12.example.com"
+      expectedError: "ExternalSecretsManager.operator.openshift.io \"cluster\" is invalid: [spec.globalConfig.proxy.noProxy: Too long: may not be more than 4096 bytes, : Invalid value: \"null\": some validation rules were not checked because the object was invalid; correct the existing errors to complete validation]"
+    - name: Should accept HTTP proxy URL at maximum length boundary
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            proxy:
+              httpProxy: "http://proxy-url-at-exactly-two-thousand-and-forty-eight-characters-to-test-the-boundary-condition-where-we-want-to-ensure-that-urls-at-the-maximum-allowed-length-are-accepted-properly-by-the-validation-system-while-urls-that-exceed-this-limit-are-rejected-appropriately-which-is-important-for-maintaining-proper-validation-boundaries-in-production-systems-where-configuration-parameters-must-be-validated-correctly-to-prevent-system-failures-or-unexpected-behavior-that-could-impact-application-functionality-and-user-experience-in-various-deployment-environments-including-development-staging-and-production-kubernetes-clusters-running-across-different-cloud-providers-and-on-premises-infrastructure-where-proxy-configurations-are-commonly-used-for-network-security-and-compliance-requirements-that-organizations-need-to-meet-for-their-business-operations-and-regulatory-obligations-in-different-geographical-regions-around-the-world-where-various-network-policies-and-security-measures-are-implemented-to-protect-sensitive-data-and-ensure-proper-access-control-for-applications-and-services-that-handle-confidential-information-and-business-critical-processes-that-must-operate-reliably-and-securely-at-all-times-without-interruption-or-performance-degradation-that-could-affect-end-users-and-customers-who-depend-on-these-systems-for-their-daily-activities-and-business-needs-which-makes-proper-validation-of-configuration-parameters-like-proxy-urls-essential-for-maintaining-system-stability-and-security-in-production-environments-where-any-configuration-error-could-have-significant-consequences-for-business-continuity-and-customer-satisfaction-which-is-why-we-implement-comprehensive-boundary-testing-to-ensure-that-all-validation-rules-work-correctly-at-their-specified-limits-and-provide-clear-error-messages-when-those-limits-are-exceeded-by-user-configurations.example.com:8080"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 1
+            proxy:
+              httpProxy: "http://proxy-url-at-exactly-two-thousand-and-forty-eight-characters-to-test-the-boundary-condition-where-we-want-to-ensure-that-urls-at-the-maximum-allowed-length-are-accepted-properly-by-the-validation-system-while-urls-that-exceed-this-limit-are-rejected-appropriately-which-is-important-for-maintaining-proper-validation-boundaries-in-production-systems-where-configuration-parameters-must-be-validated-correctly-to-prevent-system-failures-or-unexpected-behavior-that-could-impact-application-functionality-and-user-experience-in-various-deployment-environments-including-development-staging-and-production-kubernetes-clusters-running-across-different-cloud-providers-and-on-premises-infrastructure-where-proxy-configurations-are-commonly-used-for-network-security-and-compliance-requirements-that-organizations-need-to-meet-for-their-business-operations-and-regulatory-obligations-in-different-geographical-regions-around-the-world-where-various-network-policies-and-security-measures-are-implemented-to-protect-sensitive-data-and-ensure-proper-access-control-for-applications-and-services-that-handle-confidential-information-and-business-critical-processes-that-must-operate-reliably-and-securely-at-all-times-without-interruption-or-performance-degradation-that-could-affect-end-users-and-customers-who-depend-on-these-systems-for-their-daily-activities-and-business-needs-which-makes-proper-validation-of-configuration-parameters-like-proxy-urls-essential-for-maintaining-system-stability-and-security-in-production-environments-where-any-configuration-error-could-have-significant-consequences-for-business-continuity-and-customer-satisfaction-which-is-why-we-implement-comprehensive-boundary-testing-to-ensure-that-all-validation-rules-work-correctly-at-their-specified-limits-and-provide-clear-error-messages-when-those-limits-are-exceeded-by-user-configurations.example.com:8080"
+    - name: Should accept HTTPS proxy URL at maximum length boundary
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 1
+            proxy:
+              httpsProxy: "https://secure-proxy-url-at-exactly-two-thousand-and-forty-eight-characters-to-test-the-boundary-condition-where-we-want-to-ensure-that-urls-at-the-maximum-allowed-length-are-accepted-properly-by-the-validation-system-while-urls-that-exceed-this-limit-are-rejected-appropriately-which-is-important-for-maintaining-proper-validation-boundaries-in-production-systems-where-configuration-parameters-must-be-validated-correctly-to-prevent-system-failures-or-unexpected-behavior-that-could-impact-application-functionality-and-user-experience-in-various-deployment-environments-including-development-staging-and-production-kubernetes-clusters-running-across-different-cloud-providers-and-on-premises-infrastructure-where-proxy-configurations-are-commonly-used-for-network-security-and-compliance-requirements-that-organizations-need-to-meet-for-their-business-operations-and-regulatory-obligations-in-different-geographical-regions-around-the-world-where-various-network-policies-and-security-measures-are-implemented-to-protect-sensitive-data-and-ensure-proper-access-control-for-applications-and-services-that-handle-confidential-information-and-business-critical-processes-that-must-operate-reliably-and-securely-at-all-times-without-interruption-or-performance-degradation-that-could-affect-end-users-and-customers-who-depend-on-these-systems-for-their-daily-activities-and-business-needs-which-makes-proper-validation-of-configuration-parameters-like-proxy-urls-essential-for-maintaining-system-stability-and-security-in-production-environments-where-any-configuration-error-could-have-significant-consequences-for-business-continuity-and-customer-satisfaction-which-is-why-we-implement-comprehensive-boundary-testing-to-ensure-that-all-validation-rules-work-correctly-at-their-specified-limits-and-provide-clear-error-messages-when-those-limits-are-exceeded.example.com:8443"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 1
+            proxy:
+              httpsProxy: "https://secure-proxy-url-at-exactly-two-thousand-and-forty-eight-characters-to-test-the-boundary-condition-where-we-want-to-ensure-that-urls-at-the-maximum-allowed-length-are-accepted-properly-by-the-validation-system-while-urls-that-exceed-this-limit-are-rejected-appropriately-which-is-important-for-maintaining-proper-validation-boundaries-in-production-systems-where-configuration-parameters-must-be-validated-correctly-to-prevent-system-failures-or-unexpected-behavior-that-could-impact-application-functionality-and-user-experience-in-various-deployment-environments-including-development-staging-and-production-kubernetes-clusters-running-across-different-cloud-providers-and-on-premises-infrastructure-where-proxy-configurations-are-commonly-used-for-network-security-and-compliance-requirements-that-organizations-need-to-meet-for-their-business-operations-and-regulatory-obligations-in-different-geographical-regions-around-the-world-where-various-network-policies-and-security-measures-are-implemented-to-protect-sensitive-data-and-ensure-proper-access-control-for-applications-and-services-that-handle-confidential-information-and-business-critical-processes-that-must-operate-reliably-and-securely-at-all-times-without-interruption-or-performance-degradation-that-could-affect-end-users-and-customers-who-depend-on-these-systems-for-their-daily-activities-and-business-needs-which-makes-proper-validation-of-configuration-parameters-like-proxy-urls-essential-for-maintaining-system-stability-and-security-in-production-environments-where-any-configuration-error-could-have-significant-consequences-for-business-continuity-and-customer-satisfaction-which-is-why-we-implement-comprehensive-boundary-testing-to-ensure-that-all-validation-rules-work-correctly-at-their-specified-limits-and-provide-clear-error-messages-when-those-limits-are-exceeded.example.com:8443"
+    - name: Should accept empty proxy configuration
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            proxy:
+              httpProxy: ""
+              httpsProxy: ""
+              noProxy: ""
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 1
+            proxy:
+              httpProxy: ""
+              httpsProxy: ""
+              noProxy: ""
+  onUpdate:
+    - name: Should be able to add global config after creation
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec: {}
+      updated: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            labels:
+              "environment": "production"
+            logLevel: 3
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            labels:
+              "environment": "production"
+            logLevel: 3
+    - name: Should be able to update global labels
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            labels:
+              "team": "platform"
+      updated: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            labels:
+              "team": "platform"
+              "environment": "staging"
+              "version": "v2.0.0"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 1
+            labels:
+              "team": "platform"
+              "environment": "staging"
+              "version": "v2.0.0"
+    - name: Should be able to update resource requirements
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 2
+            resources:
+              requests:
+                cpu: "100m"
+                memory: "128Mi"
+      updated: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 2
+            resources:
+              requests:
+                cpu: "200m"
+                memory: "256Mi"
+              limits:
+                cpu: "500m"
+                memory: "512Mi"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 2
+            resources:
+              requests:
+                cpu: "200m"
+                memory: "256Mi"
+              limits:
+                cpu: "500m"
+                memory: "512Mi"
+    - name: Should be able to add proxy configuration
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 2
+      updated: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 2
+            proxy:
+              httpProxy: "http://proxy.company.com:3128"
+              httpsProxy: "https://proxy.company.com:3128"
+              noProxy: "localhost,127.0.0.1,.company.com"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 2
+            proxy:
+              httpProxy: "http://proxy.company.com:3128"
+              httpsProxy: "https://proxy.company.com:3128"
+              noProxy: "localhost,127.0.0.1,.company.com"
+    - name: Should be able to update node selector and tolerations
+      resourceName: cluster
+      initial: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 2
+            nodeSelector:
+              "kubernetes.io/arch": "amd64"
+      updated: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 2
+            nodeSelector:
+              "kubernetes.io/arch": "amd64"
+              "node-type": "worker"
+            tolerations:
+              - key: "node-role.kubernetes.io/master"
+                operator: "Exists"
+                effect: "NoSchedule"
+      expected: |
+        apiVersion: operator.openshift.io/v1alpha1
+        kind: ExternalSecretsManager
+        spec:
+          globalConfig:
+            logLevel: 2
+            nodeSelector:
+              "kubernetes.io/arch": "amd64"
+              "node-type": "worker"
+            tolerations:
+              - key: "node-role.kubernetes.io/master"
+                operator: "Exists"
+                effect: "NoSchedule"
\ No newline at end of file
diff --git a/hack/go-fips.sh b/hack/go-fips.sh
old mode 100644
new mode 100755
diff --git a/hack/test-apis.sh b/hack/test-apis.sh
new file mode 100755
index 00000000..c8a4495a
--- /dev/null
+++ b/hack/test-apis.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+
+set -o nounset
+set -o pipefail
+set -o errexit
+
+REPO_ROOT=$(git rev-parse --show-toplevel)
+
+OPENSHIFT_CI=${OPENSHIFT_CI:-""}
+ARTIFACT_DIR=${ARTIFACT_DIR:-""}
+GINKGO=${REPO_ROOT}/bin/ginkgo
+GINKGO_ARGS=${GINKGO_ARGS:-"-r -v --randomize-all --randomize-suites --keep-going --timeout=30m"}
+GINKGO_EXTRA_ARGS=${GINKGO_EXTRA_ARGS:-""}
+
+# Ensure that some home var is set and that it's not the root.
+# This is required for the kubebuilder cache.
+export HOME=${HOME:=/tmp/kubebuilder-testing}
+if [ $HOME == "/" ]; then
+  export HOME=/tmp/kubebuilder-testing
+fi
+
+if [ "$OPENSHIFT_CI" == "true" ] && [ -n "$ARTIFACT_DIR" ] && [ -d "$ARTIFACT_DIR" ]; then # detect ci environment there
+  GINKGO_ARGS="${GINKGO_ARGS} --junit-report=junit_api_example_tests.xml --cover --output-dir=${ARTIFACT_DIR} --no-color"
+fi
+
+# Print the command we are going to run as Make would.
+echo ${GINKGO} ${GINKGO_ARGS} ${GINKGO_EXTRA_ARGS} ${REPO_ROOT}/test/apis
+${GINKGO} ${GINKGO_ARGS} ${GINKGO_EXTRA_ARGS} ${REPO_ROOT}/test/apis
+# Capture the test result to exit on error.
+TEST_RESULT=$?
+
+# Ensure we exit based on the test result
+exit ${TEST_RESULT}
diff --git a/test/apis/README.md b/test/apis/README.md
new file mode 100644
index 00000000..4fd7fcac
--- /dev/null
+++ b/test/apis/README.md
@@ -0,0 +1 @@
+Refer to https://github.com/openshift/api/tree/master/tests for more details.
\ No newline at end of file
diff --git a/test/apis/generator.go b/test/apis/generator.go
new file mode 100644
index 00000000..51b62d36
--- /dev/null
+++ b/test/apis/generator.go
@@ -0,0 +1,586 @@
+package apis
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"path/filepath"
+	"strings"
+
+	. "github.com/onsi/ginkgo/v2"
+	. "github.com/onsi/gomega"
+	yamlpatch "github.com/vmware-archive/yaml-patch"
+
+	"github.com/ghodss/yaml"
+
+	apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	k8syaml "k8s.io/apimachinery/pkg/util/yaml"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/envtest"
+	"sigs.k8s.io/controller-runtime/pkg/envtest/komega"
+)
+
+// LoadTestSuiteSpecs recursively walks the given paths looking for any file with the suffix `.testsuite.yaml`.
+// It then loads these files in SuiteSpec structs ready for the generator to generate the test cases.
+func LoadTestSuiteSpecs(paths ...string) ([]SuiteSpec, error) {
+	suiteFiles := make(map[string]struct{})
+
+	for _, path := range paths {
+		if err := filepath.Walk(path, func(path string, info os.FileInfo, err error) error {
+			if err != nil {
+				return err
+			}
+
+			dirPath := filepath.Base(filepath.Dir(filepath.Dir(path)))
+			if !info.IsDir() && strings.HasSuffix(path, ".testsuite.yaml") && dirPath == "tests" {
+				suiteFiles[path] = struct{}{}
+			}
+
+			return nil
+		}); err != nil {
+			return nil, fmt.Errorf("could not load files from path %q: %w", path, err)
+		}
+	}
+
+	var out []SuiteSpec
+	for path := range suiteFiles {
+		suite, err := loadSuiteFile(path)
+		if err != nil {
+			return nil, fmt.Errorf("could not set up test suite: %w", err)
+		}
+
+		out = append(out, suite)
+	}
+
+	return out, nil
+}
+
+// loadSuiteFile loads an individual SuiteSpec from the given file name.
+func loadSuiteFile(path string) (SuiteSpec, error) {
+	raw, err := os.ReadFile(path)
+	if err != nil {
+		return SuiteSpec{}, fmt.Errorf("could not read file %q: %w", path, err)
+	}
+
+	s := SuiteSpec{}
+	if err := yaml.Unmarshal(raw, &s); err != nil {
+		return SuiteSpec{}, fmt.Errorf("could not unmarshal YAML file %q: %w", path, err)
+	}
+
+	if len(s.CRDName) == 0 {
+		return SuiteSpec{}, fmt.Errorf("test suite spec %q is invalid: missing required field `crdName`", path)
+	}
+
+	s.PerTestRuntimeInfo, err = perTestRuntimeInfo(filepath.Dir(path), s.CRDName)
+	if err != nil {
+		return SuiteSpec{}, fmt.Errorf("unable to determine which CRD files to use: %w", err)
+	}
+	if len(s.PerTestRuntimeInfo.CRDFilenames) == 0 {
+		return SuiteSpec{}, fmt.Errorf("missing CRD files to use for test %v", path)
+	}
+
+	if s.Version == "" {
+		version, err := getSuiteSpecTestVersion(s)
+		if err != nil {
+			return SuiteSpec{}, fmt.Errorf("could not determine test suite CRD version for %q: %w", path, err)
+		}
+		s.Version = version
+	}
+
+	return s, nil
+}
+
+// GenerateTestSuite generates a Ginkgo test suite from the provided SuiteSpec.
+func GenerateTestSuite(suiteSpec SuiteSpec) {
+	for i := range suiteSpec.PerTestRuntimeInfo.CRDFilenames {
+		crdFilename := suiteSpec.PerTestRuntimeInfo.CRDFilenames[i]
+
+		baseCRD, err := loadVersionedCRD(suiteSpec, crdFilename)
+		Expect(err).ToNot(HaveOccurred())
+
+		suiteName, err := generateSuiteName(suiteSpec, crdFilename)
+		Expect(err).ToNot(HaveOccurred())
+
+		Describe(suiteName, Ordered, func() {
+			var crdOptions envtest.CRDInstallOptions
+			var crd *apiextensionsv1.CustomResourceDefinition
+
+			BeforeEach(OncePerOrdered, func() {
+				Expect(k8sClient).ToNot(BeNil(), "Kubernetes client is not initialised")
+
+				crdOptions = envtest.CRDInstallOptions{
+					CRDs: []*apiextensionsv1.CustomResourceDefinition{
+						baseCRD.DeepCopy(),
+					},
+				}
+
+				crds, err := envtest.InstallCRDs(cfg, crdOptions)
+				Expect(err).ToNot(HaveOccurred())
+
+				Expect(crds).To(HaveLen(1), "Only one CRD should have been installed")
+				crd = crds[0]
+
+				Expect(envtest.WaitForCRDs(cfg, crds, crdOptions)).To(Succeed())
+			})
+
+			AfterEach(func() {
+				// Remove all the resources we created during the test.
+				for _, u := range newUnstructuredsFor(crd) {
+					Expect(k8sClient.DeleteAllOf(ctx, u, client.InNamespace("default"))).To(Succeed())
+				}
+			})
+
+			AfterEach(OncePerOrdered, func() {
+				// Remove the CRD and wait for it to be removed from the API.
+				// If we don't wait then subsequent tests may fail.
+				Expect(envtest.UninstallCRDs(cfg, crdOptions)).ToNot(HaveOccurred())
+				Eventually(komega.Get(crd)).Should(Not(Succeed()))
+			})
+
+			generateOnCreateTable(suiteSpec.Tests.OnCreate)
+			generateOnUpdateTable(suiteSpec.Tests.OnUpdate, crdFilename)
+		})
+	}
+}
+
+// generateOnCreateTable generates a table of tests from the defined OnCreate tests
+// within the test suite test spec.
+func generateOnCreateTable(onCreateTests []OnCreateTestSpec) {
+	type onCreateTableInput struct {
+		initial         []byte
+		expected        []byte
+		expectedError   string
+		resourceName    string
+		useGenerateName bool
+	}
+
+	// assertOnCreate runs the actual test for each table entry
+	var assertOnCreate interface{} = func(in onCreateTableInput) {
+		initialObj, err := newUnstructuredFrom(in.initial, in.resourceName, in.useGenerateName)
+		Expect(err).ToNot(HaveOccurred(), "initial data should be a valid Kubernetes YAML resource")
+
+		err = k8sClient.Create(ctx, initialObj)
+		if in.expectedError != "" {
+			Expect(err).To(MatchError(ContainSubstring(in.expectedError)))
+			return
+		}
+		Expect(err).ToNot(HaveOccurred())
+
+		// Fetch the object we just created from the API.
+		gotObj := newEmptyUnstructuredFrom(initialObj)
+		Expect(k8sClient.Get(ctx, objectKey(initialObj), gotObj)).To(Succeed())
+
+		expectedObj, err := newUnstructuredFrom(in.expected, in.resourceName, in.useGenerateName)
+		Expect(err).ToNot(HaveOccurred(), "expected data should be a valid Kubernetes YAML resource when no expected error is provided")
+
+		// Ensure the name and namespace match.
+		// The IgnoreAutogeneratedMetadata will ignore any additional meta set in the API.
+		expectedObj.SetName(gotObj.GetName())
+		expectedObj.SetNamespace(gotObj.GetNamespace())
+
+		Expect(gotObj).To(komega.EqualObject(expectedObj, komega.IgnoreAutogeneratedMetadata))
+	}
+
+	// First argument to the table is the test function.
+	tableEntries := []interface{}{assertOnCreate}
+
+	// Convert the test specs into table entries
+	for _, testEntry := range onCreateTests {
+		tableEntries = append(tableEntries, Entry(testEntry.Name, onCreateTableInput{
+			initial:         []byte(testEntry.Initial),
+			expected:        []byte(testEntry.Expected),
+			expectedError:   testEntry.ExpectedError,
+			resourceName:    testEntry.ResourceName,
+			useGenerateName: testEntry.UseGenerateName,
+		}))
+	}
+
+	if len(tableEntries) > 1 {
+		DescribeTable("On Create", tableEntries...)
+	}
+}
+
+// generateOnUpdateTable generates a table of tests from the defined OnUpdate tests
+// within the test suite test spec.
+func generateOnUpdateTable(onUpdateTests []OnUpdateTestSpec, crdFileName string) {
+	type onUpdateTableInput struct {
+		crdPatches          []Patch
+		initial             []byte
+		updated             []byte
+		expected            []byte
+		expectedError       string
+		expectedStatusError string
+		resourceName        string
+		useGenerateName     bool
+	}
+
+	var assertOnUpdate interface{} = func(in onUpdateTableInput) {
+		var originalCRDObjectKey client.ObjectKey
+		var originalCRDSpec apiextensionsv1.CustomResourceDefinitionSpec
+
+		initialObj, err := newUnstructuredFrom(in.initial, in.resourceName, in.useGenerateName)
+		Expect(err).ToNot(HaveOccurred(), "initial data should be a valid Kubernetes YAML resource")
+
+		if len(in.crdPatches) > 0 {
+			patchedCRD, err := getPatchedCRD(crdFileName, in.crdPatches)
+			Expect(err).ToNot(HaveOccurred(), "could not load patched crd")
+
+			originalCRDObjectKey = objectKey(patchedCRD)
+
+			originalCRD := &apiextensionsv1.CustomResourceDefinition{}
+			Expect(k8sClient.Get(ctx, originalCRDObjectKey, originalCRD)).To(Succeed())
+
+			originalCRDSpec = *originalCRD.Spec.DeepCopy()
+			originalCRD.Spec = patchedCRD.Spec
+
+			// Add a sentinel field so that we can check that the schema update has persisted.
+			originalCRD.Spec.Versions[0].Schema.OpenAPIV3Schema.Properties["sentinel"] = apiextensionsv1.JSONSchemaProps{
+				Type: "string",
+				Enum: []apiextensionsv1.JSON{
+					{Raw: []byte(fmt.Sprintf(`"%s+patched"`, initialObj.GetUID()))},
+				},
+			}
+			initialObj.Object["sentinel"] = initialObj.GetUID() + "+patched"
+
+			Expect(k8sClient.Update(ctx, originalCRD)).To(Succeed(), "failed updating patched CRD schema")
+		}
+
+		initialStatus, ok, err := unstructured.NestedFieldNoCopy(initialObj.Object, "status")
+		Expect(err).ToNot(HaveOccurred())
+		if ok {
+			Expect(initialStatus).ToNot(BeNil())
+		}
+
+		// Use an eventually here, so that we retry until the sential correctly applies.
+		Eventually(func() error {
+			return k8sClient.Create(ctx, initialObj)
+		}).Should(Succeed(), "initial object should create successfully")
+
+		if initialStatus != nil {
+			Expect(unstructured.SetNestedField(initialObj.Object, initialStatus, "status")).To(Succeed(), "should be able to restore initial status")
+			Expect(k8sClient.Status().Update(ctx, initialObj)).ToNot(HaveOccurred(), "initial object status should update successfully")
+		}
+
+		if len(in.crdPatches) > 0 {
+			originalCRD := &apiextensionsv1.CustomResourceDefinition{}
+			Expect(k8sClient.Get(ctx, originalCRDObjectKey, originalCRD)).To(Succeed())
+
+			originalCRD.Spec = originalCRDSpec
+
+			// Add a sentinel field so that we can check that the schema update has persisted.
+			originalCRD.Spec.Versions[0].Schema.OpenAPIV3Schema.Properties["sentinel"] = apiextensionsv1.JSONSchemaProps{
+				Type: "string",
+				Enum: []apiextensionsv1.JSON{
+					{Raw: []byte(fmt.Sprintf(`"%s+restored"`, initialObj.GetUID()))},
+				},
+			}
+
+			Expect(k8sClient.Update(ctx, originalCRD)).To(Succeed())
+
+			Eventually(func() error {
+				updatedObj := initialObj.DeepCopy()
+				updatedObj.Object["sentinel"] = initialObj.GetUID() + "+restored"
+
+				return k8sClient.Update(ctx, updatedObj)
+			}).Should(Succeed(), "Sentinel should be persisted")
+
+			// Drop the sentinel field now we know the rest of the CRD schema is up to date.
+			originalCRD.Spec = originalCRDSpec
+			Expect(k8sClient.Update(ctx, originalCRD)).To(Succeed())
+		}
+
+		// Fetch the object we just created from the API.
+		gotObj := newEmptyUnstructuredFrom(initialObj)
+		Expect(k8sClient.Get(ctx, objectKey(initialObj), gotObj)).To(Succeed())
+
+		updatedObj, err := newUnstructuredFrom(in.updated, in.resourceName, in.useGenerateName)
+		Expect(err).ToNot(HaveOccurred(), "updated data should be a valid Kubernetes YAML resource")
+
+		updatedObjStatus, ok, err := unstructured.NestedFieldNoCopy(updatedObj.Object, "status")
+		Expect(err).ToNot(HaveOccurred())
+		if ok {
+			Expect(updatedObjStatus).ToNot(BeNil())
+		}
+
+		// The updated object needs the following fields copied over.
+		updatedObj.SetName(gotObj.GetName())
+		updatedObj.SetNamespace(gotObj.GetNamespace())
+		updatedObj.SetResourceVersion(gotObj.GetResourceVersion())
+
+		err = k8sClient.Update(ctx, updatedObj)
+		if in.expectedError != "" {
+			Expect(err).To(MatchError(ContainSubstring(in.expectedError)))
+			return
+		}
+		Expect(err).ToNot(HaveOccurred(), "unexpected error updating spec")
+
+		if updatedObjStatus != nil {
+			Expect(unstructured.SetNestedField(updatedObj.Object, updatedObjStatus, "status")).To(Succeed(), "should be able to restore updated status")
+
+			err := k8sClient.Status().Update(ctx, updatedObj)
+			if in.expectedStatusError != "" {
+				Expect(err).To(MatchError(ContainSubstring(in.expectedStatusError)))
+				return
+			}
+			Expect(err).ToNot(HaveOccurred(), "unexpected error updating status")
+		}
+
+		Expect(k8sClient.Get(ctx, objectKey(initialObj), gotObj)).To(Succeed())
+
+		expectedObj, err := newUnstructuredFrom(in.expected, in.resourceName, in.useGenerateName)
+		Expect(err).ToNot(HaveOccurred(), "expected data should be a valid Kubernetes YAML resource when no expected error is provided")
+
+		// Ensure the name and namespace match.
+		// The IgnoreAutogeneratedMetadata will ignore any additional meta set in the API.
+		expectedObj.SetName(gotObj.GetName())
+		expectedObj.SetNamespace(gotObj.GetNamespace())
+
+		Expect(gotObj).To(komega.EqualObject(expectedObj, komega.IgnoreAutogeneratedMetadata))
+	}
+
+	// First argument to the table is the test function.
+	tableEntries := []interface{}{assertOnUpdate}
+
+	// Convert the test specs into table entries
+	for _, testEntry := range onUpdateTests {
+		tableEntries = append(tableEntries, Entry(testEntry.Name, onUpdateTableInput{
+			crdPatches:          testEntry.InitialCRDPatches,
+			initial:             []byte(testEntry.Initial),
+			updated:             []byte(testEntry.Updated),
+			expected:            []byte(testEntry.Expected),
+			expectedError:       testEntry.ExpectedError,
+			expectedStatusError: testEntry.ExpectedStatusError,
+			resourceName:        testEntry.ResourceName,
+			useGenerateName:     testEntry.UseGenerateName,
+		}))
+	}
+
+	if len(tableEntries) > 1 {
+		DescribeTable("On Update", tableEntries...)
+	}
+}
+
+// newUnstructuredsFor creates a set of unstructured resources for each version of the CRD.
+// This allows us to ensure all CR instances are deleted after each test.
+func newUnstructuredsFor(crd *apiextensionsv1.CustomResourceDefinition) []*unstructured.Unstructured {
+	var out []*unstructured.Unstructured
+
+	for _, version := range crd.Spec.Versions {
+		out = append(out, newUnstructuredsForVersion(crd, version.Name))
+	}
+
+	return out
+}
+
+// newUnstructuredsForVersion creates an unstructured resource for the CRD at a given version.
+func newUnstructuredsForVersion(crd *apiextensionsv1.CustomResourceDefinition, version string) *unstructured.Unstructured {
+	u := &unstructured.Unstructured{}
+
+	u.SetAPIVersion(fmt.Sprintf("%s/%s", crd.Spec.Group, version))
+	u.SetKind(crd.Spec.Names.Kind)
+
+	return u
+}
+
+// newUnstructuredFrom unmarshals the raw YAML data into an unstructured,
+// and then sets the namespace and generateName ahead of the test.
+func newUnstructuredFrom(raw []byte, resourceName string, useGenerateName bool) (*unstructured.Unstructured, error) {
+	u := &unstructured.Unstructured{}
+
+	if err := k8syaml.Unmarshal(raw, &u.Object); err != nil {
+		return nil, fmt.Errorf("could not unmarshal raw YAML: %w", err)
+	}
+
+	// Names should be unique for each test so ensure we generate a name
+	if useGenerateName {
+		u.SetGenerateName(resourceName)
+	} else {
+		u.SetName(resourceName)
+	}
+	// We need to have a namespace, use the default.
+	u.SetNamespace("default")
+
+	return u, nil
+}
+
+// newEmptyUnstructuredFrom creates a new unstructured with the same GVK as the input object,
+// all other fields are cleared.
+func newEmptyUnstructuredFrom(initial *unstructured.Unstructured) *unstructured.Unstructured {
+	u := &unstructured.Unstructured{}
+
+	if initial != nil {
+		u.GetObjectKind().SetGroupVersionKind(initial.GetObjectKind().GroupVersionKind())
+	}
+
+	return u
+}
+
+// objectKey extracts a client.ObjectKey from the given object.
+func objectKey(obj client.Object) client.ObjectKey {
+	return client.ObjectKey{Namespace: obj.GetNamespace(), Name: obj.GetName()}
+}
+
+func loadCRDFromFile(filename string) (*apiextensionsv1.CustomResourceDefinition, error) {
+	raw, err := os.ReadFile(filename)
+	if err != nil {
+		return nil, fmt.Errorf("could not load CRD: %w", err)
+	}
+
+	crd := &apiextensionsv1.CustomResourceDefinition{}
+	if err := yaml.Unmarshal(raw, crd); err != nil {
+		return nil, fmt.Errorf("could not unmarshal CRD: %w", err)
+	}
+
+	return crd, nil
+}
+
+// loadVersionedCRD loads the CRD and removes any version schema that is not the current suite
+// version. This allows testing of CRDs for versions that are not currently the storage version.
+func loadVersionedCRD(suiteSpec SuiteSpec, crdFilename string) (*apiextensionsv1.CustomResourceDefinition, error) {
+	crd, err := loadCRDFromFile(crdFilename)
+	if err != nil {
+		return nil, fmt.Errorf("could not load CRD: %w", err)
+	}
+
+	if suiteSpec.Version == "" {
+		return crd, nil
+	}
+
+	var crdVersions []apiextensionsv1.CustomResourceDefinitionVersion
+
+	for _, version := range crd.Spec.Versions {
+		if version.Name != suiteSpec.Version {
+			continue
+		}
+
+		version.Storage = true
+		version.Served = true
+
+		crdVersions = append(crdVersions, version)
+	}
+
+	if len(crdVersions) == 0 {
+		return nil, fmt.Errorf("could not find CRD version matching version %s", suiteSpec.Version)
+	}
+
+	crd.Spec.Versions = crdVersions
+
+	return crd, nil
+}
+
+// generateSuiteName prepends the specified suite name with the GVR string
+// for the CRD under test.
+func generateSuiteName(suiteSpec SuiteSpec, crdFilename string) (string, error) {
+	crd, err := loadCRDFromFile(crdFilename)
+	if err != nil {
+		return "", fmt.Errorf("could not load CRD: %w", err)
+	}
+	filename := filepath.Base(crdFilename)
+
+	gvr := schema.GroupVersionResource{
+		Group:    crd.Spec.Group,
+		Resource: crd.Spec.Names.Plural,
+		Version:  suiteSpec.Version,
+	}
+
+	return fmt.Sprintf(
+		"[%s][File=%v] %s",
+		gvr.String(),
+		filename,
+		suiteSpec.Name,
+	), nil
+}
+
+// getSuiteSpecTestVersion is used to populate the test suites version
+// field when not set.
+// This is then used to set storage and served versions as well as
+// to generate the test suite name.
+func getSuiteSpecTestVersion(suiteSpec SuiteSpec) (string, error) {
+	version := ""
+	for _, file := range suiteSpec.PerTestRuntimeInfo.CRDFilenames {
+		crd, err := loadCRDFromFile(file)
+		if err != nil {
+			return "", err
+		}
+		if len(crd.Spec.Versions) > 1 {
+			return "", fmt.Errorf("too many versions, specify one in the suite")
+		}
+		if len(version) == 0 {
+			version = crd.Spec.Versions[0].Name
+			continue
+		}
+
+		if version != crd.Spec.Versions[0].Name {
+			return "", fmt.Errorf("too many versions, specify one in the suite.  Saw %v and %v", version, crd.Spec.Versions[0].Name)
+		}
+	}
+
+	return version, nil
+}
+
+func getPatchedCRD(crdFileName string, patches []Patch) (*apiextensionsv1.CustomResourceDefinition, error) {
+	patch := yamlpatch.Patch{}
+
+	for _, p := range patches {
+		patch = append(patch, yamlpatch.Operation{
+			Op:    yamlpatch.Op(p.Op),
+			Path:  yamlpatch.OpPath(p.Path),
+			Value: yamlpatch.NewNode(p.Value),
+		})
+	}
+
+	baseDoc, err := os.ReadFile(crdFileName)
+	if err != nil {
+		return nil, fmt.Errorf("could not read file %q: %w", crdFileName, err)
+	}
+
+	patchedDoc, err := patch.Apply(baseDoc)
+	if err != nil {
+		return nil, fmt.Errorf("could not apply patch: %w", err)
+	}
+
+	placeholderWrapper := yamlpatch.NewPlaceholderWrapper("{{", "}}")
+	patchedData := bytes.NewBuffer(placeholderWrapper.Unwrap(patchedDoc))
+
+	crd := &apiextensionsv1.CustomResourceDefinition{}
+	if err := yaml.Unmarshal(patchedData.Bytes(), crd); err != nil {
+		return nil, fmt.Errorf("could not unmarshal CRD: %w", err)
+	}
+
+	return crd, nil
+}
+
+func perTestRuntimeInfo(suitePath, crdName string) (*PerTestRuntimeInfo, error) {
+	var crdFilesToCheck []string
+	relativePathForCRDs := filepath.Join(suitePath, "..", "..", "..", "..", "config", "crd", "bases")
+
+	generatedCRDs, err := os.ReadDir(relativePathForCRDs)
+	if err != nil {
+		return nil, err
+	}
+	for _, currCRDFile := range generatedCRDs {
+		relativeFilename := filepath.Join(relativePathForCRDs, currCRDFile.Name())
+		filename, err := filepath.Abs(relativeFilename)
+		if err != nil {
+			return nil, fmt.Errorf("could not generate absolute path for %q: %w", relativeFilename, err)
+		}
+
+		currCRD, err := loadCRDFromFile(filename)
+		if err != nil {
+			// not all files are CRDs, verify will catch garbage.
+			continue
+		}
+		if currCRD.Name == crdName {
+			crdFilesToCheck = append(crdFilesToCheck, filename)
+			break
+		}
+		continue
+	}
+
+	ret := &PerTestRuntimeInfo{
+		CRDFilenames: crdFilesToCheck,
+	}
+	return ret, nil
+}
diff --git a/test/apis/suite_test.go b/test/apis/suite_test.go
new file mode 100644
index 00000000..4a2f131a
--- /dev/null
+++ b/test/apis/suite_test.go
@@ -0,0 +1,86 @@
+package apis
+
+import (
+	"fmt"
+	"path/filepath"
+	"strconv"
+	"strings"
+	"testing"
+
+	. "github.com/onsi/ginkgo/v2"
+	. "github.com/onsi/gomega"
+
+	"k8s.io/client-go/discovery"
+	"k8s.io/client-go/kubernetes/scheme"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/envtest"
+	"sigs.k8s.io/controller-runtime/pkg/envtest/komega"
+	logf "sigs.k8s.io/controller-runtime/pkg/log"
+	"sigs.k8s.io/controller-runtime/pkg/log/zap"
+
+	operatorv1alpha1 "github.com/openshift/external-secrets-operator/api/v1alpha1"
+	//+kubebuilder:scaffold:imports
+)
+
+func TestAPIs(t *testing.T) {
+	RegisterFailHandler(Fail)
+
+	g := NewGomegaWithT(t)
+
+	var err error
+	suites, err = LoadTestSuiteSpecs(filepath.Join("../../api"))
+	g.Expect(err).ToNot(HaveOccurred())
+
+	RunSpecs(t, "API Integration Suite")
+}
+
+var _ = BeforeSuite(func() {
+	logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
+
+	By("bootstrapping test environment")
+	testEnv = &envtest.Environment{
+		CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")},
+	}
+
+	var err error
+	cfg, err = testEnv.Start()
+	Expect(err).NotTo(HaveOccurred())
+	Expect(cfg).NotTo(BeNil())
+
+	testScheme = scheme.Scheme
+	Expect(operatorv1alpha1.AddToScheme(testScheme)).To(Succeed())
+
+	//+kubebuilder:scaffold:scheme
+
+	k8sClient, err = client.New(cfg, client.Options{Scheme: testScheme})
+	Expect(err).NotTo(HaveOccurred())
+	Expect(k8sClient).NotTo(BeNil())
+
+	// CEL requires Kube 1.25 and above, so check for the minimum server version.
+	discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg)
+	Expect(err).ToNot(HaveOccurred())
+
+	serverVersion, err := discoveryClient.ServerVersion()
+	Expect(err).ToNot(HaveOccurred())
+
+	Expect(serverVersion.Major).To(Equal("1"))
+
+	minorInt, err := strconv.Atoi(strings.Split(serverVersion.Minor, "+")[0])
+	Expect(err).ToNot(HaveOccurred())
+	Expect(minorInt).To(BeNumerically(">=", 25), fmt.Sprintf("This test suite requires a Kube API server of at least version 1.25, current version is 1.%s", serverVersion.Minor))
+
+	komega.SetClient(k8sClient)
+	komega.SetContext(ctx)
+})
+
+var _ = AfterSuite(func() {
+	By("tearing down the test environment")
+	err := testEnv.Stop()
+	Expect(err).NotTo(HaveOccurred())
+})
+
+var _ = Describe("API Integration Tests", Ordered, ContinueOnFailure, func() {
+	for _, suite := range suites {
+		GenerateTestSuite(suite)
+	}
+})
diff --git a/test/apis/vars.go b/test/apis/vars.go
new file mode 100644
index 00000000..78ebc1a1
--- /dev/null
+++ b/test/apis/vars.go
@@ -0,0 +1,143 @@
+package apis
+
+import (
+	"context"
+
+	"k8s.io/apimachinery/pkg/runtime"
+	"k8s.io/client-go/rest"
+	"sigs.k8s.io/controller-runtime/pkg/client"
+	"sigs.k8s.io/controller-runtime/pkg/envtest"
+)
+
+/*
+ * Content taken from https://github.com/openshift/api/tree/master/tests
+ */
+
+var cfg *rest.Config
+var k8sClient client.Client
+var testEnv *envtest.Environment
+var testScheme *runtime.Scheme
+var ctx = context.Background()
+var suites []SuiteSpec
+
+// SuiteSpec defines a test suite specification.
+type SuiteSpec struct {
+	// Name is the name of the test suite.
+	Name string `json:"name"`
+
+	CRDName string `json:"crdName"`
+
+	// Version is the version of the CRD under test in this file.
+	// When omitted, if there is a single version in the CRD, this is assumed to be the correct version.
+	// If there are multiple versions within the CRD, an educated guess is made based on the directory structure.
+	Version string `json:"version,omitempty"`
+
+	// Tests defines the test cases to run for this test suite.
+	Tests TestSpec `json:"tests"`
+
+	// PerTestRuntimeInfo cannot be specified in the testcase itself, but at runtime must be computed.
+	PerTestRuntimeInfo *PerTestRuntimeInfo `json:"-"`
+}
+
+// TestSpec defines the test specs for individual tests in this suite.
+type TestSpec struct {
+	// OnCreate defines a list of on create style tests.
+	OnCreate []OnCreateTestSpec `json:"onCreate"`
+
+	// OnUpdate defines a list of on create style tests.
+	OnUpdate []OnUpdateTestSpec `json:"onUpdate"`
+}
+
+// OnCreateTestSpec defines an individual test case for the on create style tests.
+type OnCreateTestSpec struct {
+	// Name is the name of this test case.
+	Name string `json:"name"`
+
+	// ResourceName is the name to be used for the resource under test.
+	ResourceName string `json:"resourceName"`
+
+	// UseGenerateName is for indicating whether random string should be prefixed to the ResourceName.
+	UseGenerateName bool `json:"useGenerateName"`
+
+	// Initial is a literal string containing the initial YAML content from which to
+	// create the resource.
+	// Note `apiVersion` and `kind` fields are required though `metadata` can be omitted.
+	// Typically, this will vary in `spec` only test to test.
+	Initial string `json:"initial"`
+
+	// ExpectedError defines the error string that should be returned when the initial resourec is invalid.
+	// This will be matched as a substring of the actual error when non-empty.
+	ExpectedError string `json:"expectedError"`
+
+	// Expected is a literal string containing the expected YAML content that should be
+	// persisted when the resource is created.
+	// Note `apiVersion` and `kind` fields are required though `metadata` can be omitted.
+	// Typically, this will vary in `spec` only test to test.
+	Expected string `json:"expected"`
+}
+
+type PerTestRuntimeInfo struct {
+	// CRDFilenames indicates all the CRD filenames that this test applies to.  Remember that tests can apply to multiple
+	// files depending on whether their gates are included in each one.
+	CRDFilenames []string `json:"-"`
+}
+
+// OnUpdateTestSpec defines an individual test case for the on update style tests.
+type OnUpdateTestSpec struct {
+	// Name is the name of this test case.
+	Name string `json:"name"`
+
+	// ResourceName is the name to be used for the resource under test.
+	ResourceName string `json:"resourceName"`
+
+	// UseGenerateName is for indicating whether random string should be prefixed to the ResourceName.
+	UseGenerateName bool `json:"useGenerateName"`
+
+	// InitialCRDPatches is a list of YAML patches to apply to the CRD before applying
+	// the initial version of the resource.
+	// Once the initial version has been applied, the CRD will be restored to its
+	// original state before the updated object is applied.
+	// This can be used to test ratcheting validation of CRD schema changes over time.
+	InitialCRDPatches []Patch `json:"initialCRDPatches"`
+
+	// Initial is a literal string containing the initial YAML content from which to
+	// create the resource.
+	// Note `apiVersion` and `kind` fields are required though `metadata` can be omitted.
+	// Typically, this will vary in `spec` only test to test.
+	Initial string `json:"initial"`
+
+	// Updated is a literal string containing the updated YAML content from which to
+	// update the resource.
+	// Note `apiVersion` and `kind` fields are required though `metadata` can be omitted.
+	// Typically, this will vary in `spec` only test to test.
+	Updated string `json:"updated"`
+
+	// ExpectedError defines the error string that should be returned when the updated resource is invalid.
+	// This will be matched as a substring of the actual error when non-empty.
+	ExpectedError string `json:"expectedError"`
+
+	// ExpectedStatusError defines the error string that should be returned when the updated resource status is invalid.
+	// This will be matched as a substring of the actual error when non-empty.
+	ExpectedStatusError string `json:"expectedStatusError"`
+
+	// Expected is a literal string containing the expected YAML content that should be
+	// persisted when the resource is updated.
+	// Note `apiVersion` and `kind` fields are required though `metadata` can be omitted.
+	// Typically, this will vary in `spec` only test to test.
+	Expected string `json:"expected"`
+}
+
+// Patch represents a single operation to be applied to a YAML document.
+// It follows the JSON Patch format as defined in RFC 6902.
+// Each patch operation is atomic and can be used to modify the structure
+// or content of a YAML document.
+type Patch struct {
+	// Op is the operation to be performed. Common operations include "add", "remove", "replace", "move", "copy", and "test".
+	Op string `json:"op"`
+
+	// Path is a JSON Pointer that indicates the location in the YAML document where the operation is to be performed.
+	Path string `json:"path"`
+
+	// Value is the value to be used within the operation. This field is required for operations like "add" and "replace".
+	Value *interface{} `json:"value"`
+}
diff --git a/tools/tools.go b/tools/tools.go
index 16e7fcac..c44e526d 100644
--- a/tools/tools.go
+++ b/tools/tools.go
@@ -11,6 +11,7 @@ import (
 	_ "github.com/elastic/crd-ref-docs"
 	_ "github.com/go-bindata/go-bindata/go-bindata"
 	_ "github.com/maxbrunsfeld/counterfeiter/v6"
+	_ "github.com/onsi/ginkgo/v2/ginkgo"
 	_ "github.com/openshift/build-machinery-go"
 	_ "golang.org/x/vuln/cmd/govulncheck"
 

From ffb8d54bddf50e1575720183bd29bf323fc2cc10 Mon Sep 17 00:00:00 2001
From: Bharath B 
Date: Fri, 26 Sep 2025 11:54:41 +0530
Subject: [PATCH 3/7] ESO-101: Restructures and renames ExternalSecrets API to
 ExternalSecretsConfig

Signed-off-by: Bharath B 
---
 PROJECT                                       |   2 +-
 README.md                                     |  10 +-
 api/v1alpha1/conditions.go                    |  10 +-
 api/v1alpha1/external_secrets_config_types.go | 204 ++++++++++
 .../external_secrets_manager_types.go         |  85 ++--
 api/v1alpha1/external_secrets_types.go        | 206 ----------
 api/v1alpha1/groupversion_info.go             |   2 +-
 api/v1alpha1/meta.go                          | 110 ++++-
 api/v1alpha1/zz_generated.deepcopy.go         | 303 ++++++++------
 ...ecrets-operator.clusterserviceversion.yaml |  28 +-
 ....openshift.io_externalsecretsconfigs.yaml} | 383 +++++++++++-------
 ....openshift.io_externalsecretsmanagers.yaml | 127 ++++--
 ....openshift.io_externalsecretsconfigs.yaml} | 383 +++++++++++-------
 ....openshift.io_externalsecretsmanagers.yaml | 127 ++++--
 config/crd/kustomization.yaml                 |   2 +-
 ...ecrets-operator.clusterserviceversion.yaml |  20 +-
 config/rbac/role.yaml                         |   6 +-
 config/samples/kustomization.yaml             |   2 +-
 ...rator_v1alpha1_externalsecretsconfig.yaml} |   2 +-
 docs/api_reference.md                         | 277 ++++++++-----
 pkg/controller/common/constants.go            |   6 +-
 pkg/controller/common/utils.go                |  25 +-
 pkg/controller/commontest/utils.go            |  16 +-
 pkg/controller/crd_annotator/controller.go    |  49 ++-
 .../crd_annotator/controller_test.go          | 108 ++---
 .../external_secrets/certificate.go           |  63 +--
 .../external_secrets/certificate_test.go      | 160 ++++----
 pkg/controller/external_secrets/constants.go  |   6 +-
 pkg/controller/external_secrets/controller.go |  79 ++--
 .../external_secrets/deployments.go           | 102 ++---
 .../external_secrets/deployments_test.go      | 102 ++---
 .../install_external_secrets.go               |  43 +-
 pkg/controller/external_secrets/rbacs.go      |  76 ++--
 pkg/controller/external_secrets/rbacs_test.go |  70 ++--
 pkg/controller/external_secrets/secret.go     |  16 +-
 .../external_secrets/secret_test.go           |  77 ++--
 .../external_secrets/service_test.go          |  26 +-
 .../external_secrets/serviceaccounts.go       |  14 +-
 .../external_secrets/serviceaccounts_test.go  |  48 ++-
 pkg/controller/external_secrets/services.go   |  20 +-
 pkg/controller/external_secrets/utils.go      |  85 ++--
 .../external_secrets/validatingwebhook.go     |  37 +-
 .../validatingwebhook_test.go                 |  16 +-
 .../external_secrets_manager/controller.go    |  64 +--
 .../controller_test.go                        |  42 +-
 .../externalsecretsmanager.go                 |   2 +-
 pkg/operator/setup_manager.go                 |   8 +-
 test/e2e/e2e_test.go                          |   4 +-
 test/e2e/testdata/external_secret.yaml        |   2 +-
 49 files changed, 2104 insertions(+), 1551 deletions(-)
 create mode 100644 api/v1alpha1/external_secrets_config_types.go
 delete mode 100644 api/v1alpha1/external_secrets_types.go
 rename bundle/manifests/{operator.openshift.io_externalsecrets.yaml => operator.openshift.io_externalsecretsconfigs.yaml} (87%)
 rename config/crd/bases/{operator.openshift.io_externalsecrets.yaml => operator.openshift.io_externalsecretsconfigs.yaml} (87%)
 rename config/samples/{operator_v1alpha1_externalsecrets.yaml => operator_v1alpha1_externalsecretsconfig.yaml} (81%)

diff --git a/PROJECT b/PROJECT
index b477a8e2..b2b0c3ee 100644
--- a/PROJECT
+++ b/PROJECT
@@ -16,7 +16,7 @@ resources:
   controller: true
   domain: openshift.io
   group: operator
-  kind: ExternalSecrets
+  kind: ExternalSecretsConfig
   path: github.com/openshift/external-secrets-operator/api/v1alpha1
   version: v1alpha1
 version: "3"
diff --git a/README.md b/README.md
index edb40776..43bdb20f 100644
--- a/README.md
+++ b/README.md
@@ -22,18 +22,18 @@ Using the External Secrets Operator ensures the following:
 The External Secrets Operator for Red Hat OpenShift uses the [`external-secrets`](https://github.com/openshift/external-secrets) helm charts
 to install application. The operator has three controllers to achieve the same:
 - `external_secrets_manager` controller: This is responsible for
-  * reconciling the `externalsecretsmanagers.openshift.operator.io` resource.
+  * reconciling the `externalsecretsmanagers.operator.openshift.io` resource.
   * providing the status of other controllers.
 - `external_secrets` controller: This is responsible for
-  * reconciling the `externalsecrets.openshift.operator.io` resource.
-  * installing and managing the `external-secrets` application based on the user defined configurations in `externalsecrets.openshift.operator.io` resource.
-  * reconciling the `externalsecretsmanagers.openshift.operator.io` resource for the global configurations and updates the `external-scerets` deployment accordingly.
+  * reconciling the `externalsecretsconfigs.operator.openshift.io` resource.
+  * installing and managing the `external-secrets` application based on the user defined configurations in `externalsecretsconfigs.operator.openshift.io` resource.
+  * reconciling the `externalsecretsmanagers.operator.openshift.io` resource for the global configurations and updates the `external-scerets` deployment accordingly.
 - `crd_annotator` controller:
   * This is responsible for adding `cert-manager.io/inject-ca-from` annotation in the `external-secrets` provided CRDs.
   * This is an optional controller, which will be activated only when [`cert-manager`](https://cert-manager.io/) is installed.
   * When `cert-manager` is installed after External Secrets Operator installation, `external-secrets-operator-controller-manager` deployment must be restarted to activate the controller.
 
-The operator automatically creates a cluster-scoped `externalsecretsmanagers.openshift.operator.io` object named `cluster`.
+The operator automatically creates a cluster-scoped `externalsecretsmanagers.operator.openshift.io` object named `cluster`.
 
 For more information about
 - `external-secrets-operator for Red Hat OpenShift`, refer to the [link](https://docs.redhat.com/en/documentation/openshift_container_platform/latest/html/security_and_compliance/external-secrets-operator-for-red-hat-openshift)
diff --git a/api/v1alpha1/conditions.go b/api/v1alpha1/conditions.go
index ddaf06f0..0624eefc 100644
--- a/api/v1alpha1/conditions.go
+++ b/api/v1alpha1/conditions.go
@@ -1,9 +1,7 @@
 package v1alpha1
 
 const (
-	// Degraded is the condition type used to inform state of the operator when
-	// it has failed with irrecoverable error like permission issues.
-	// DebugEnabled has the following options:
+	// Degraded is the condition type used to inform state of the operator when it has failed with irrecoverable error like permission issues.
 	//   Status:
 	//   - True
 	//   - False
@@ -11,8 +9,7 @@ const (
 	//   - Failed
 	Degraded string = "Degraded"
 
-	// Ready is the condition type used to inform state of readiness of the
-	// operator to process external-secrets enabling requests.
+	// Ready is the condition type used to inform state of readiness of the operator to process external-secrets enabling requests.
 	//   Status:
 	//   - True
 	//   - False
@@ -22,8 +19,7 @@ const (
 	//   - Ready: operand successfully deployed and ready
 	Ready string = "Ready"
 
-	// UpdateAnnotation is the condition type used to inform status of
-	// updating the annotations.
+	// UpdateAnnotation is the condition type used to inform status of updating the annotations.
 	//   Status:
 	//   - True
 	//   - False
diff --git a/api/v1alpha1/external_secrets_config_types.go b/api/v1alpha1/external_secrets_config_types.go
new file mode 100644
index 00000000..e8f78335
--- /dev/null
+++ b/api/v1alpha1/external_secrets_config_types.go
@@ -0,0 +1,204 @@
+package v1alpha1
+
+import (
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func init() {
+	SchemeBuilder.Register(&ExternalSecretsConfig{}, &ExternalSecretsConfigList{})
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+
+// ExternalSecretsConfigList is a list of ExternalSecretsConfig objects.
+type ExternalSecretsConfigList struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// metadata is the standard list's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	metav1.ListMeta `json:"metadata"`
+	Items           []ExternalSecretsConfig `json:"items"`
+}
+
+// +genclient
+// +genclient:nonNamespaced
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +kubebuilder:object:root=true
+// +kubebuilder:subresource:status
+// +kubebuilder:resource:path=externalsecretsconfigs,scope=Cluster,categories={external-secrets-operator, external-secrets},shortName=esc;externalsecretsconfig;esconfig
+// +kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].status"
+// +kubebuilder:printcolumn:name="Message",type="string",JSONPath=".status.conditions[?(@.type=='Ready')].message"
+// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp"
+// +kubebuilder:metadata:labels={"app.kubernetes.io/name=externalsecretsconfig", "app.kubernetes.io/part-of=external-secrets-operator"}
+
+// ExternalSecretsConfig describes configuration and information about the managed external-secrets deployment.
+// The name must be `cluster` as ExternalSecretsConfig is a singleton, allowing only one instance per cluster.
+//
+// When an ExternalSecretsConfig is created, the controller installs the external-secrets and keeps it in the desired state.
+//
+// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="ExternalSecretsConfig is a singleton, .metadata.name must be 'cluster'"
+// +operator-sdk:csv:customresourcedefinitions:displayName="ExternalSecretsConfig"
+type ExternalSecretsConfig struct {
+	metav1.TypeMeta `json:",inline"`
+
+	// metadata is the standard object's metadata.
+	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+	metav1.ObjectMeta `json:"metadata,omitempty"`
+
+	// spec is the specification of the desired behavior of the ExternalSecretsConfig.
+	Spec ExternalSecretsConfigSpec `json:"spec,omitempty"`
+
+	// status is the most recently observed status of the ExternalSecretsConfig.
+	Status ExternalSecretsConfigStatus `json:"status,omitempty"`
+}
+
+// ExternalSecretsConfigSpec is for configuring the external-secrets operand behavior.
+// +kubebuilder:validation:XValidation:rule="!has(self.plugins) || !has(self.plugins.bitwardenSecretManagerProvider) || !has(self.plugins.bitwardenSecretManagerProvider.mode) || self.plugins.bitwardenSecretManagerProvider.mode != 'Enabled' || has(self.plugins.bitwardenSecretManagerProvider.secretRef) || (has(self.controllerConfig) && has(self.controllerConfig.certProvider) && has(self.controllerConfig.certProvider.certManager) && has(self.controllerConfig.certProvider.certManager.mode) && self.controllerConfig.certProvider.certManager.mode == 'Enabled')",message="secretRef or certManager must be configured when bitwardenSecretManagerProvider plugin is enabled"
+type ExternalSecretsConfigSpec struct {
+	// appConfig is for specifying the configurations for the `external-secrets` operand.
+	// +kubebuilder:validation:Optional
+	ApplicationConfig ApplicationConfig `json:"appConfig,omitempty"`
+
+	// plugins is for configuring the optional provider plugins.
+	// +kubebuilder:validation:Optional
+	Plugins PluginsConfig `json:"plugins,omitempty"`
+
+	// controllerConfig is for specifying the configurations for the controller to use while installing the `external-secrets` operand and the plugins.
+	// +kubebuilder:validation:Optional
+	ControllerConfig ControllerConfig `json:"controllerConfig,omitempty"`
+}
+
+// ExternalSecretsConfigStatus is the most recently observed status of the ExternalSecretsConfig.
+type ExternalSecretsConfigStatus struct {
+	// conditions holds information of the current state of the external-secrets deployment.
+	ConditionalStatus `json:",inline,omitempty"`
+
+	// externalSecretsImage is the name of the image and the tag used for deploying external-secrets.
+	ExternalSecretsImage string `json:"externalSecretsImage,omitempty"`
+
+	// BitwardenSDKServerImage is the name of the image and the tag used for deploying bitwarden-sdk-server.
+	BitwardenSDKServerImage string `json:"bitwardenSDKServerImage,omitempty"`
+}
+
+// ApplicationConfig is for specifying the configurations for the external-secrets operand.
+type ApplicationConfig struct {
+	// operatingNamespace is for restricting the external-secrets operations to the provided namespace.
+	// When configured `ClusterSecretStore` and `ClusterExternalSecret` are implicitly disabled.
+	// +kubebuilder:validation:MinLength:=1
+	// +kubebuilder:validation:MaxLength:=63
+	// +kubebuilder:validation:Optional
+	OperatingNamespace string `json:"operatingNamespace,omitempty"`
+
+	// webhookConfig is for configuring external-secrets webhook specifics.
+	// +kubebuilder:validation:Optional
+	WebhookConfig *WebhookConfig `json:"webhookConfig,omitempty"`
+
+	// +kubebuilder:validation:Optional
+	CommonConfigs `json:",inline,omitempty"`
+}
+
+// ControllerConfig is for specifying the configurations for the controller to use while installing the `external-secrets` operand and the plugins.
+type ControllerConfig struct {
+	// certProvider is for defining the configuration for certificate providers used to manage TLS certificates for webhook and plugins.
+	// +kubebuilder:validation:Optional
+	CertProvider *CertProvidersConfig `json:"certProvider,omitempty"`
+
+	// labels to apply to all resources created for the external-secrets operand deployment.
+	// This field can have a maximum of 20 entries.
+	// +mapType=granular
+	// +kubebuilder:validation:MinProperties:=0
+	// +kubebuilder:validation:MaxProperties:=20
+	// +kubebuilder:validation:Optional
+	Labels map[string]string `json:"labels,omitempty"`
+
+	// periodicReconcileInterval specifies the time interval in seconds for periodic reconciliation by the operator.
+	// This controls how often the operator checks resources created for external-secrets operand to ensure they remain in desired state.
+	// Interval can have value between 120-18000 seconds (2 minutes to 5 hours). Defaults to 300 seconds (5 minutes) if not specified.
+	// +kubebuilder:default:=300
+	// +kubebuilder:validation:Minimum:=120
+	// +kubebuilder:validation:Maximum:=18000
+	// +kubebuilder:validation:Optional
+	PeriodicReconcileInterval uint32 `json:"periodicReconcileInterval,omitempty"`
+}
+
+// BitwardenSecretManagerProvider is for enabling the bitwarden secrets manager provider and for setting up the additional service required for connecting with the bitwarden server.
+type BitwardenSecretManagerProvider struct {
+	// mode indicates bitwarden secrets manager provider state, which can be indicated by setting Enabled or Disabled.
+	// Enabled: Enables the Bitwarden provider plugin. The operator will ensure the plugin is deployed and its state is synchronized.
+	// Disabled: Disables reconciliation of the Bitwarden provider plugin. The plugin and its resources will remain in their current state and will not be managed by the operator.
+	// +kubebuilder:validation:Enum:=Enabled;Disabled
+	// +kubebuilder:default:=Disabled
+	// +kubebuilder:validation:Optional
+	Mode Mode `json:"mode,omitempty"`
+
+	// SecretRef is the Kubernetes secret containing the TLS key pair to be used for the bitwarden server.
+	// The issuer in CertManagerConfig will be utilized to generate the required certificate if the secret reference is not provided and CertManagerConfig is configured.
+	// The key names in secret for certificate must be `tls.crt`, for private key must be `tls.key` and for CA certificate key name must be `ca.crt`.
+	// +kubebuilder:validation:Optional
+	SecretRef *SecretReference `json:"secretRef,omitempty"`
+}
+
+// WebhookConfig is for configuring external-secrets webhook specifics.
+type WebhookConfig struct {
+	// CertificateCheckInterval is for configuring the polling interval to check the certificate validity.
+	// +kubebuilder:default:="5m"
+	// +kubebuilder:validation:Optional
+	CertificateCheckInterval *metav1.Duration `json:"certificateCheckInterval,omitempty"`
+}
+
+// CertManagerConfig is for configuring cert-manager specifics.
+// +kubebuilder:validation:XValidation:rule="self.mode != 'Enabled' || has(self.issuerRef)",message="issuerRef must be provided when mode is set to Enabled."
+// +kubebuilder:validation:XValidation:rule="has(self.injectAnnotations) && self.injectAnnotations != 'false' ? self.mode != 'Disabled' : true",message="injectAnnotations can only be set when mode is set to Enabled."
+type CertManagerConfig struct {
+	// mode indicates whether to use cert-manager for certificate management, instead of built-in cert-controller.
+	// Enabled: Makes use of cert-manager for obtaining the certificates for webhook server and other components.
+	// Disabled: Makes use of in-built cert-controller for obtaining the certificates for webhook server, which is the default behavior.
+	// This field is immutable once set.
+	// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="mode is immutable once set"
+	// +kubebuilder:validation:Enum:=Enabled;Disabled
+	// +kubebuilder:default:=Disabled
+	// +kubebuilder:validation:Required
+	Mode Mode `json:"mode,omitempty"`
+
+	// injectAnnotations is for adding the `cert-manager.io/inject-ca-from` annotation to the webhooks and CRDs to automatically setup webhook to use the cert-manager CA. This requires CA Injector to be enabled in cert-manager.
+	// Use `true` or `false` to indicate the preference. This field is immutable once set.
+	// +kubebuilder:validation:Enum:="true";"false"
+	// +kubebuilder:default:="false"
+	// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="injectAnnotations is immutable once set"
+	// +kubebuilder:validation:Optional
+	InjectAnnotations string `json:"injectAnnotations,omitempty"`
+
+	// issuerRef contains details of the referenced object used for obtaining certificates.
+	// When `issuerRef.Kind` is `Issuer`, it must exist in the `external-secrets` namespace.
+	// This field is immutable once set.
+	// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="issuerRef is immutable once set"
+	// +kubebuilder:validation:XValidation:rule="!has(self.kind) || self.kind.lowerAscii() == 'issuer' || self.kind.lowerAscii() == 'clusterissuer'",message="kind must be either 'Issuer' or 'ClusterIssuer'"
+	// +kubebuilder:validation:XValidation:rule="!has(self.group) || self.group.lowerAscii() == 'cert-manager.io'",message="group must be 'cert-manager.io'"
+	// +kubebuilder:validation:Optional
+	IssuerRef ObjectReference `json:"issuerRef,omitempty"`
+
+	// certificateDuration is the validity period of the webhook certificate.
+	// +kubebuilder:default:="8760h"
+	// +kubebuilder:validation:Optional
+	CertificateDuration *metav1.Duration `json:"certificateDuration,omitempty"`
+
+	// certificateRenewBefore is the ahead time to renew the webhook certificate before expiry.
+	// +kubebuilder:default:="30m"
+	// +kubebuilder:validation:Optional
+	CertificateRenewBefore *metav1.Duration `json:"certificateRenewBefore,omitempty"`
+}
+
+// PluginsConfig is for configuring the optional plugins.
+type PluginsConfig struct {
+	// bitwardenSecretManagerProvider is for enabling the bitwarden secrets manager provider plugin for connecting with the bitwarden secrets manager.
+	// +kubebuilder:validation:Optional
+	BitwardenSecretManagerProvider *BitwardenSecretManagerProvider `json:"bitwardenSecretManagerProvider,omitempty"`
+}
+
+// CertProvidersConfig defines the configuration for certificate providers used to manage TLS certificates for webhook and plugins.
+type CertProvidersConfig struct {
+	// certManager is for configuring cert-manager provider specifics.
+	// +kubebuilder:validation:Optional
+	CertManager *CertManagerConfig `json:"certManager,omitempty"`
+}
diff --git a/api/v1alpha1/external_secrets_manager_types.go b/api/v1alpha1/external_secrets_manager_types.go
index 4cc61f2c..8c036ed3 100644
--- a/api/v1alpha1/external_secrets_manager_types.go
+++ b/api/v1alpha1/external_secrets_manager_types.go
@@ -1,7 +1,6 @@
 package v1alpha1
 
 import (
-	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
@@ -10,7 +9,7 @@ func init() {
 }
 
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-//+kubebuilder:object:root=true
+// +kubebuilder:object:root=true
 
 // ExternalSecretsManagerList is a list of ExternalSecretsManager objects.
 type ExternalSecretsManagerList struct {
@@ -23,18 +22,19 @@ type ExternalSecretsManagerList struct {
 }
 
 // +genclient
+// +genclient:nonNamespaced
 // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
 // +kubebuilder:object:root=true
 // +kubebuilder:subresource:status
-// +kubebuilder:resource:scope=Cluster
+// +kubebuilder:resource:path=externalsecretsmanagers,scope=Cluster,categories={external-secrets-operator, external-secrets},shortName=esm;externalsecretsmanager;esmanager
+// +kubebuilder:printcolumn:name="AGE",type="date",JSONPath=".metadata.creationTimestamp"
+// +kubebuilder:metadata:labels={"app.kubernetes.io/name=externalsecretsmanager", "app.kubernetes.io/part-of=external-secrets-operator"}
 
-// ExternalSecretsManager describes configuration and information about the deployments managed by
-// the external-secrets-operator. The name must be `cluster` as this is a singleton object allowing
-// only one instance of ExternalSecretsManager per cluster.
+// ExternalSecretsManager describes configuration and information about the deployments managed by the external-secrets-operator.
+// The name must be `cluster` as this is a singleton object allowing only one instance of ExternalSecretsManager per cluster.
 //
-// It is mainly for configuring the global options and enabling optional features, which
-// serves as a common/centralized config for managing multiple controllers of the operator. The object
-// is automatically created during the operator installation.
+// It is mainly for configuring the global options and enabling optional features, which serves as a common/centralized config for managing multiple controllers of the operator.
+// The object is automatically created during the operator installation.
 //
 // +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="ExternalSecretsManager is a singleton, .metadata.name must be 'cluster'"
 // +operator-sdk:csv:customresourcedefinitions:displayName="ExternalSecretsManager"
@@ -48,77 +48,58 @@ type ExternalSecretsManager struct {
 	// spec is the specification of the desired behavior
 	Spec ExternalSecretsManagerSpec `json:"spec,omitempty"`
 
-	// status is the most recently observed status of controllers used by
-	// External Secrets Operator.
+	// status is the most recently observed status of controllers used by External Secrets Operator.
 	Status ExternalSecretsManagerStatus `json:"status,omitempty"`
 }
 
 // ExternalSecretsManagerSpec is the specification of the desired behavior of the ExternalSecretsManager.
 type ExternalSecretsManagerSpec struct {
-	// globalConfig is for configuring the behavior of deployments that are managed
-	// by external secrets-operator.
+	// globalConfig is for configuring the behavior of deployments that are managed by external secrets-operator.
 	// +kubebuilder:validation:Optional
 	GlobalConfig *GlobalConfig `json:"globalConfig,omitempty"`
 
-	// features is for enabling the optional operator features.
+	// optionalFeatures is for enabling the optional operator features.
+	// +patchMergeKey=name
+	// +patchStrategy=merge
+	// +listType=map
+	// +listMapKey=name
 	// +kubebuilder:validation:Optional
-	Features []Feature `json:"features,omitempty"`
+	OptionalFeatures []Feature `json:"optionalFeatures,omitempty"`
 }
 
 // GlobalConfig is for configuring the external-secrets-operator behavior.
 type GlobalConfig struct {
-	// logLevel supports value range as per [kubernetes logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
-	// +kubebuilder:default:=1
-	// +kubebuilder:validation:Minimum:=1
-	// +kubebuilder:validation:Maximum:=5
-	// +kubebuilder:validation:Optional
-	LogLevel int32 `json:"logLevel,omitempty"`
-
-	// resources is for defining the resource requirements.
-	// Cannot be updated.
-	// ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-	// +kubebuilder:validation:Optional
-	Resources corev1.ResourceRequirements `json:"resources,omitempty"`
-
-	// affinity is for setting scheduling affinity rules.
-	// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-	// +kubebuilder:validation:Optional
-	Affinity *corev1.Affinity `json:"affinity,omitempty"`
-
-	// tolerations is for setting the pod tolerations.
-	// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
-	// +kubebuilder:validation:Optional
-	// +listType=atomic
-	Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
-
-	// nodeSelector is for defining the scheduling criteria using node labels.
-	// ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-	// +kubebuilder:validation:Optional
-	// +mapType=atomic
-	NodeSelector map[string]string `json:"nodeSelector,omitempty"`
-
-	// labels to apply to all resources created for external-secrets deployment.
+	// labels to apply to all resources created by the operator.
+	// This field can have a maximum of 20 entries.
 	// +mapType=granular
+	// +kubebuilder:validation:MinProperties:=0
+	// +kubebuilder:validation:MaxProperties:=20
 	// +kubebuilder:validation:Optional
 	Labels map[string]string `json:"labels,omitempty"`
+
+	CommonConfigs `json:",inline,omitempty"`
 }
 
-// Feature is for enabling the optional features.
 // Feature is for enabling the optional features.
 type Feature struct {
-	// name of the optional feature.
+	// name of the optional feature. There are no optional features currently supported.
+	// +kubebuilder:validation:Enum:=""
 	// +kubebuilder:validation:Required
 	Name string `json:"name"`
 
-	// enabled determines if feature should be turned on.
+	// mode indicates the feature state.
+	// Use Enabled or Disabled to indicate the preference.
+	// Enabled: Enables the optional feature and creates resources if required.
+	// Disabled: Disables the optional feature, but will not remove any resources created.
+	// +kubebuilder:validation:Enum:=Enabled;Disabled
 	// +kubebuilder:validation:Required
-	Enabled bool `json:"enabled"`
+	Mode Mode `json:"mode"`
 }
 
 // ExternalSecretsManagerStatus is the most recently observed status of the ExternalSecretsManager.
 type ExternalSecretsManagerStatus struct {
 	// controllerStatuses holds the observed conditions of the controllers part of the operator.
-	// +patchMergeKey=type
+	// +patchMergeKey=name
 	// +patchStrategy=merge
 	// +listType=map
 	// +listMapKey=name
@@ -134,7 +115,7 @@ type ExternalSecretsManagerStatus struct {
 type ControllerStatus struct {
 	// name of the controller for which the observed condition is recorded.
 	// +kubebuilder:validation:Required
-	Name string `json:"name,omitempty"`
+	Name string `json:"name"`
 
 	// conditions holds information of the current state of the external-secrets-operator controllers.
 	// +patchMergeKey=type
diff --git a/api/v1alpha1/external_secrets_types.go b/api/v1alpha1/external_secrets_types.go
deleted file mode 100644
index a8cce44a..00000000
--- a/api/v1alpha1/external_secrets_types.go
+++ /dev/null
@@ -1,206 +0,0 @@
-package v1alpha1
-
-import (
-	corev1 "k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
-)
-
-func init() {
-	SchemeBuilder.Register(&ExternalSecrets{}, &ExternalSecretsList{})
-}
-
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-//+kubebuilder:object:root=true
-
-// ExternalSecretsList is a list of ExternalSecrets objects.
-type ExternalSecretsList struct {
-	metav1.TypeMeta `json:",inline"`
-
-	// metadata is the standard list's metadata.
-	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
-	metav1.ListMeta `json:"metadata"`
-	Items           []ExternalSecrets `json:"items"`
-}
-
-// +genclient
-// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
-// +kubebuilder:object:root=true
-// +kubebuilder:subresource:status
-// +kubebuilder:resource:scope=Cluster
-
-// ExternalSecrets describes configuration and information about the managed external-secrets
-// deployment. The name must be `cluster` as ExternalSecrets is a singleton,
-// allowing only one instance per cluster.
-//
-// When an ExternalSecrets is created, a new deployment is created which manages the
-// external-secrets and keeps it in the desired state.
-//
-// +kubebuilder:validation:XValidation:rule="self.metadata.name == 'cluster'",message="ExternalSecrets is a singleton, .metadata.name must be 'cluster'"
-// +operator-sdk:csv:customresourcedefinitions:displayName="ExternalSecrets"
-type ExternalSecrets struct {
-	metav1.TypeMeta `json:",inline"`
-
-	// metadata is the standard object's metadata.
-	// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
-	metav1.ObjectMeta `json:"metadata,omitempty"`
-
-	// spec is the specification of the desired behavior of the ExternalSecrets.
-	Spec ExternalSecretsSpec `json:"spec,omitempty"`
-
-	// status is the most recently observed status of the ExternalSecrets.
-	Status ExternalSecretsStatus `json:"status,omitempty"`
-}
-
-// ExternalSecretsSpec is the specification of the desired behavior of the ExternalSecrets.
-type ExternalSecretsSpec struct {
-	// externalSecretsConfig is for configuring the external-secrets behavior.
-	// +kubebuilder:validation:Optional
-	ExternalSecretsConfig *ExternalSecretsConfig `json:"externalSecretsConfig,omitempty"`
-
-	// controllerConfig is for configuring the controller for setting up
-	// defaults to enable external-secrets.
-	// +kubebuilder:validation:Optional
-	ControllerConfig *ControllerConfig `json:"controllerConfig,omitempty"`
-}
-
-// ExternalSecretsStatus is the most recently observed status of the ExternalSecrets.
-type ExternalSecretsStatus struct {
-	// conditions holds information of the current state of the external-secrets deployment.
-	ConditionalStatus `json:",inline,omitempty"`
-
-	// externalSecretsImage is the name of the image and the tag used for deploying external-secrets.
-	ExternalSecretsImage string `json:"externalSecretsImage,omitempty"`
-}
-
-// ExternalSecretsConfig is for configuring the external-secrets behavior.
-type ExternalSecretsConfig struct {
-	// logLevel supports value range as per [kubernetes logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
-	// +kubebuilder:default:=1
-	// +kubebuilder:validation:Minimum:=1
-	// +kubebuilder:validation:Maximum:=5
-	// +kubebuilder:validation:Optional
-	LogLevel int32 `json:"logLevel,omitempty"`
-
-	// operatingNamespace is for restricting the external-secrets operations to provided namespace.
-	// And when enabled `ClusterSecretStore` and `ClusterExternalSecret` are implicitly disabled.
-	// +kubebuilder:validation:Optional
-	OperatingNamespace string `json:"operatingNamespace,omitempty"`
-
-	// bitwardenSecretManagerProvider is for enabling the bitwarden secrets manager provider and
-	// for setting up the additional service required for connecting with the bitwarden server.
-	// +kubebuilder:validation:Optional
-	BitwardenSecretManagerProvider *BitwardenSecretManagerProvider `json:"bitwardenSecretManagerProvider,omitempty"`
-
-	// webhookConfig is for configuring external-secrets webhook specifics.
-	WebhookConfig *WebhookConfig `json:"webhookConfig,omitempty"`
-
-	// CertManagerConfig is for configuring cert-manager specifics, which will be used for generating
-	// certificates for webhook and bitwarden-sdk-server components.
-	// +kubebuilder:validation:Optional
-	CertManagerConfig *CertManagerConfig `json:"certManagerConfig,omitempty"`
-
-	// resources is for defining the resource requirements.
-	// Cannot be updated.
-	// ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
-	// +kubebuilder:validation:Optional
-	Resources corev1.ResourceRequirements `json:"resources,omitempty"`
-
-	// affinity is for setting scheduling affinity rules.
-	// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
-	// +kubebuilder:validation:Optional
-	Affinity *corev1.Affinity `json:"affinity,omitempty"`
-
-	// tolerations is for setting the pod tolerations.
-	// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
-	// +kubebuilder:validation:Optional
-	// +listType=atomic
-	Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
-
-	// nodeSelector is for defining the scheduling criteria using node labels.
-	// ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-	// +kubebuilder:validation:Optional
-	// +mapType=atomic
-	NodeSelector map[string]string `json:"nodeSelector,omitempty"`
-}
-
-// ControllerConfig is for configuring the operator for setting up
-// defaults to install external-secrets.
-// +kubebuilder:validation:XValidation:rule="!has(oldSelf.namespace) && !has(self.namespace) || has(oldSelf.namespace) && has(self.namespace)",message="namespace may only be configured during creation"
-type ControllerConfig struct {
-	// namespace is for configuring the namespace to install the external-secret operand.
-	// +kubebuilder:validation:Optional
-	// +kubebuilder:default:="external-secrets"
-	// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="namespace is immutable once set"
-	Namespace string `json:"namespace,omitempty"`
-
-	// labels to apply to all resources created for external-secrets deployment.
-	// +mapType=granular
-	// +kubebuilder:validation:Optional
-	Labels map[string]string `json:"labels,omitempty"`
-}
-
-// BitwardenSecretManagerProvider is for enabling the bitwarden secrets manager provider and
-// for setting up the additional service required for connecting with the bitwarden server.
-type BitwardenSecretManagerProvider struct {
-	// enabled is for enabling the bitwarden secrets manager provider, which can be indicated
-	// by setting `true` or `false`.
-	// +kubebuilder:default:="false"
-	// +kubebuilder:validation:Enum:="true";"false"
-	// +kubebuilder:validation:Optional
-	Enabled string `json:"enabled,omitempty"`
-
-	// SecretRef is the kubernetes secret containing the TLS key pair to be used for the bitwarden server.
-	// The issuer in CertManagerConfig will be utilized to generate the required certificate if the secret
-	// reference is not provided and CertManagerConfig is configured. The key names in secret for certificate
-	// must be `tls.crt`, for private key must be `tls.key` and for CA certificate key name must be `ca.crt`.
-	// +kubebuilder:validation:Optional
-	SecretRef SecretReference `json:"secretRef,omitempty"`
-}
-
-// WebhookConfig is for configuring external-secrets webhook specifics.
-type WebhookConfig struct {
-	// CertificateCheckInterval is for configuring the polling interval to check the certificate
-	// validity.
-	// +kubebuilder:default:="5m"
-	// +kubebuilder:validation:Optional
-	CertificateCheckInterval metav1.Duration `json:"certificateCheckInterval,omitempty"`
-}
-
-// CertManagerConfig is for configuring cert-manager specifics.
-// +kubebuilder:validation:XValidation:rule="has(self.addInjectorAnnotations) && self.addInjectorAnnotations != 'false' ? self.enabled != 'false' : true",message="certManagerConfig must have enabled set, to set addInjectorAnnotations"
-type CertManagerConfig struct {
-	// enabled is for enabling the use of cert-manager for obtaining and renewing the
-	// certificates used for webhook server, instead of built-in certificates.
-	// Use `true` or `false` to indicate the preference.
-	// +kubebuilder:default:="false"
-	// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="enabled is immutable once set"
-	// +kubebuilder:validation:Enum:="true";"false"
-	// +kubebuilder:validation:Required
-	Enabled string `json:"enabled,omitempty"`
-
-	// addInjectorAnnotations is for adding the `cert-manager.io/inject-ca-from` annotation to the
-	// webhooks and CRDs to automatically setup webhook to the cert-manager CA. This requires
-	// CA Injector to be enabled in cert-manager. Use `true` or `false` to indicate the preference.
-	// +kubebuilder:default:="false"
-	// +kubebuilder:validation:Enum:="true";"false"
-	// +kubebuilder:validation:Optional
-	AddInjectorAnnotations string `json:"addInjectorAnnotations,omitempty"`
-
-	// issuerRef contains details to the referenced object used for
-	// obtaining the certificates. It must exist in the external-secrets
-	// namespace if not using a cluster-scoped cert-manager issuer.
-	// +kubebuilder:validation:XValidation:rule="self == oldSelf",message="issuerRef is immutable once set"
-	// +kubebuilder:validation:Required
-	IssuerRef ObjectReference `json:"issuerRef,omitempty"`
-
-	// certificateDuration is the validity period of the webhook certificate.
-	// +kubebuilder:default:="8760h"
-	// +kubebuilder:validation:Optional
-	CertificateDuration *metav1.Duration `json:"certificateDuration,omitempty"`
-
-	// certificateRenewBefore is the ahead time to renew the webhook certificate
-	// before expiry.
-	// +kubebuilder:default:="30m"
-	// +kubebuilder:validation:Optional
-	CertificateRenewBefore *metav1.Duration `json:"certificateRenewBefore,omitempty"`
-}
diff --git a/api/v1alpha1/groupversion_info.go b/api/v1alpha1/groupversion_info.go
index 02bae444..4ed04fdc 100644
--- a/api/v1alpha1/groupversion_info.go
+++ b/api/v1alpha1/groupversion_info.go
@@ -35,8 +35,8 @@ var (
 	AddToScheme = SchemeBuilder.AddToScheme
 )
 
+// Resource takes an unqualified resource and returns a Group qualified GroupResource.
 // Manually added to conform to k8s code-generator lister-gen.
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
 func Resource(resource string) schema.GroupResource {
 	return GroupVersion.WithResource(resource).GroupResource()
 }
diff --git a/api/v1alpha1/meta.go b/api/v1alpha1/meta.go
index cde0b70c..278e38ad 100644
--- a/api/v1alpha1/meta.go
+++ b/api/v1alpha1/meta.go
@@ -1,9 +1,11 @@
 package v1alpha1
 
 import (
+	corev1 "k8s.io/api/core/v1"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
+// ConditionalStatus holds information of the current state of the external-secrets deployment indicated through defined conditions.
 type ConditionalStatus struct {
 	// conditions holds information of the current state of deployment.
 	// +patchMergeKey=type
@@ -16,20 +18,124 @@ type ConditionalStatus struct {
 // ObjectReference is a reference to an object with a given name, kind and group.
 type ObjectReference struct {
 	// Name of the resource being referred to.
+	// +kubebuilder:validation:MinLength:=1
+	// +kubebuilder:validation:MaxLength:=253
 	// +kubebuilder:validation:Required
 	Name string `json:"name"`
+
 	// Kind of the resource being referred to.
+	// +kubebuilder:validation:MinLength:=1
+	// +kubebuilder:validation:MaxLength:=253
 	// +kubebuilder:validation:Optional
 	Kind string `json:"kind,omitempty"`
+
 	// Group of the resource being referred to.
+	// +kubebuilder:validation:MinLength:=1
+	// +kubebuilder:validation:MaxLength:=253
 	// +kubebuilder:validation:Optional
 	Group string `json:"group,omitempty"`
 }
 
-// SecretReference is a reference to the secret with the given name, which should exist
-// in the same namespace where it will be utilized.
+// SecretReference is a reference to the secret with the given name, which should exist in the same namespace where it will be utilized.
 type SecretReference struct {
 	// Name of the secret resource being referred to.
+	// +kubebuilder:validation:MinLength:=1
+	// +kubebuilder:validation:MaxLength:=253
 	// +kubebuilder:validation:Required
 	Name string `json:"name"`
 }
+
+// CommonConfigs are the common configurations available for all the operands managed by the operator.
+type CommonConfigs struct {
+	// logLevel supports value range as per [Kubernetes logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
+	// +kubebuilder:default:=1
+	// +kubebuilder:validation:Minimum:=1
+	// +kubebuilder:validation:Maximum:=5
+	// +kubebuilder:validation:Optional
+	LogLevel int32 `json:"logLevel,omitempty"`
+
+	// resources is for defining the resource requirements.
+	// Cannot be updated.
+	// ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
+	// +kubebuilder:validation:Optional
+	Resources *corev1.ResourceRequirements `json:"resources,omitempty"`
+
+	// affinity is for setting scheduling affinity rules.
+	// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
+	// +kubebuilder:validation:Optional
+	Affinity *corev1.Affinity `json:"affinity,omitempty"`
+
+	// tolerations is for setting the pod tolerations.
+	// ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+	// This field can have a maximum of 50 entries.
+	// +listType=atomic
+	// +kubebuilder:validation:MinItems:=0
+	// +kubebuilder:validation:MaxItems:=50
+	// +kubebuilder:validation:Optional
+	Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
+
+	// nodeSelector is for defining the scheduling criteria using node labels.
+	// ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+	// This field can have a maximum of 50 entries.
+	// +mapType=atomic
+	// +kubebuilder:validation:MinProperties:=0
+	// +kubebuilder:validation:MaxProperties:=50
+	// +kubebuilder:validation:Optional
+	NodeSelector map[string]string `json:"nodeSelector,omitempty"`
+
+	// proxy is for setting the proxy configurations which will be made available in operand containers managed by the operator as environment variables.
+	// +kubebuilder:validation:Optional
+	Proxy *ProxyConfig `json:"proxy,omitempty"`
+}
+
+// ProxyConfig is for setting the proxy configurations which will be made available in operand containers managed by the operator as environment variables.
+type ProxyConfig struct {
+	// httpProxy is the URL of the proxy for HTTP requests.
+	// This field can have a maximum of 2048 characters.
+	// +kubebuilder:validation:MinLength:=0
+	// +kubebuilder:validation:MaxLength:=2048
+	// +kubebuilder:validation:Optional
+	HTTPProxy string `json:"httpProxy,omitempty"`
+
+	// httpsProxy is the URL of the proxy for HTTPS requests.
+	// This field can have a maximum of 2048 characters.
+	// +kubebuilder:validation:MinLength:=0
+	// +kubebuilder:validation:MaxLength:=2048
+	// +kubebuilder:validation:Optional
+	HTTPSProxy string `json:"httpsProxy,omitempty"`
+
+	// noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used.
+	// This field can have a maximum of 4096 characters.
+	// +kubebuilder:validation:MinLength:=0
+	// +kubebuilder:validation:MaxLength:=4096
+	// +kubebuilder:validation:Optional
+	NoProxy string `json:"noProxy,omitempty"`
+}
+
+// Mode indicates the operational state of the optional features.
+type Mode string
+
+const (
+	// Enabled indicates the optional configuration is enabled.
+	Enabled Mode = "Enabled"
+
+	// Disabled indicates the optional configuration is disabled.
+	Disabled Mode = "Disabled"
+
+	// DisabledAndCleanup indicates the optional configuration is disabled and created resources are automatically removed.
+	DisabledAndCleanup Mode = "DisabledAndCleanup"
+)
+
+// PurgePolicy defines the policy for purging default resources.
+type PurgePolicy string
+
+const (
+	// PurgeAll indicates to purge all the created resources.
+	PurgeAll PurgePolicy = "PurgeAll"
+
+	// PurgeNone indicates to purge none of the created resources.
+	PurgeNone PurgePolicy = "PurgeNone"
+
+	// PurgeExceptSecrets indicates to purge all the created resources except the Secret resource.
+	PurgeExceptSecrets PurgePolicy = "PurgeExceptSecrets"
+)
diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go
index f4d482c4..223ccb66 100644
--- a/api/v1alpha1/zz_generated.deepcopy.go
+++ b/api/v1alpha1/zz_generated.deepcopy.go
@@ -21,15 +21,40 @@ limitations under the License.
 package v1alpha1
 
 import (
-	"k8s.io/api/core/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	corev1 "k8s.io/api/core/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1"
 	runtime "k8s.io/apimachinery/pkg/runtime"
 )
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApplicationConfig) DeepCopyInto(out *ApplicationConfig) {
+	*out = *in
+	if in.WebhookConfig != nil {
+		in, out := &in.WebhookConfig, &out.WebhookConfig
+		*out = new(WebhookConfig)
+		(*in).DeepCopyInto(*out)
+	}
+	in.CommonConfigs.DeepCopyInto(&out.CommonConfigs)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApplicationConfig.
+func (in *ApplicationConfig) DeepCopy() *ApplicationConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(ApplicationConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *BitwardenSecretManagerProvider) DeepCopyInto(out *BitwardenSecretManagerProvider) {
 	*out = *in
-	out.SecretRef = in.SecretRef
+	if in.SecretRef != nil {
+		in, out := &in.SecretRef, &out.SecretRef
+		*out = new(SecretReference)
+		**out = **in
+	}
 }
 
 // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BitwardenSecretManagerProvider.
@@ -48,12 +73,12 @@ func (in *CertManagerConfig) DeepCopyInto(out *CertManagerConfig) {
 	out.IssuerRef = in.IssuerRef
 	if in.CertificateDuration != nil {
 		in, out := &in.CertificateDuration, &out.CertificateDuration
-		*out = new(metav1.Duration)
+		*out = new(v1.Duration)
 		**out = **in
 	}
 	if in.CertificateRenewBefore != nil {
 		in, out := &in.CertificateRenewBefore, &out.CertificateRenewBefore
-		*out = new(metav1.Duration)
+		*out = new(v1.Duration)
 		**out = **in
 	}
 }
@@ -68,6 +93,70 @@ func (in *CertManagerConfig) DeepCopy() *CertManagerConfig {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CertProvidersConfig) DeepCopyInto(out *CertProvidersConfig) {
+	*out = *in
+	if in.CertManager != nil {
+		in, out := &in.CertManager, &out.CertManager
+		*out = new(CertManagerConfig)
+		(*in).DeepCopyInto(*out)
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertProvidersConfig.
+func (in *CertProvidersConfig) DeepCopy() *CertProvidersConfig {
+	if in == nil {
+		return nil
+	}
+	out := new(CertProvidersConfig)
+	in.DeepCopyInto(out)
+	return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CommonConfigs) DeepCopyInto(out *CommonConfigs) {
+	*out = *in
+	if in.Resources != nil {
+		in, out := &in.Resources, &out.Resources
+		*out = new(corev1.ResourceRequirements)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Affinity != nil {
+		in, out := &in.Affinity, &out.Affinity
+		*out = new(corev1.Affinity)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.Tolerations != nil {
+		in, out := &in.Tolerations, &out.Tolerations
+		*out = make([]corev1.Toleration, len(*in))
+		for i := range *in {
+			(*in)[i].DeepCopyInto(&(*out)[i])
+		}
+	}
+	if in.NodeSelector != nil {
+		in, out := &in.NodeSelector, &out.NodeSelector
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	if in.Proxy != nil {
+		in, out := &in.Proxy, &out.Proxy
+		*out = new(ProxyConfig)
+		**out = **in
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonConfigs.
+func (in *CommonConfigs) DeepCopy() *CommonConfigs {
+	if in == nil {
+		return nil
+	}
+	out := new(CommonConfigs)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *Condition) DeepCopyInto(out *Condition) {
 	*out = *in
@@ -88,7 +177,7 @@ func (in *ConditionalStatus) DeepCopyInto(out *ConditionalStatus) {
 	*out = *in
 	if in.Conditions != nil {
 		in, out := &in.Conditions, &out.Conditions
-		*out = make([]metav1.Condition, len(*in))
+		*out = make([]v1.Condition, len(*in))
 		for i := range *in {
 			(*in)[i].DeepCopyInto(&(*out)[i])
 		}
@@ -108,6 +197,11 @@ func (in *ConditionalStatus) DeepCopy() *ConditionalStatus {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *ControllerConfig) DeepCopyInto(out *ControllerConfig) {
 	*out = *in
+	if in.CertProvider != nil {
+		in, out := &in.CertProvider, &out.CertProvider
+		*out = new(CertProvidersConfig)
+		(*in).DeepCopyInto(*out)
+	}
 	if in.Labels != nil {
 		in, out := &in.Labels, &out.Labels
 		*out = make(map[string]string, len(*in))
@@ -148,7 +242,7 @@ func (in *ControllerStatus) DeepCopy() *ControllerStatus {
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExternalSecrets) DeepCopyInto(out *ExternalSecrets) {
+func (in *ExternalSecretsConfig) DeepCopyInto(out *ExternalSecretsConfig) {
 	*out = *in
 	out.TypeMeta = in.TypeMeta
 	in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
@@ -156,18 +250,18 @@ func (in *ExternalSecrets) DeepCopyInto(out *ExternalSecrets) {
 	in.Status.DeepCopyInto(&out.Status)
 }
 
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSecrets.
-func (in *ExternalSecrets) DeepCopy() *ExternalSecrets {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSecretsConfig.
+func (in *ExternalSecretsConfig) DeepCopy() *ExternalSecretsConfig {
 	if in == nil {
 		return nil
 	}
-	out := new(ExternalSecrets)
+	out := new(ExternalSecretsConfig)
 	in.DeepCopyInto(out)
 	return out
 }
 
 // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ExternalSecrets) DeepCopyObject() runtime.Object {
+func (in *ExternalSecretsConfig) DeepCopyObject() runtime.Object {
 	if c := in.DeepCopy(); c != nil {
 		return c
 	}
@@ -175,85 +269,69 @@ func (in *ExternalSecrets) DeepCopyObject() runtime.Object {
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExternalSecretsConfig) DeepCopyInto(out *ExternalSecretsConfig) {
+func (in *ExternalSecretsConfigList) DeepCopyInto(out *ExternalSecretsConfigList) {
 	*out = *in
-	if in.BitwardenSecretManagerProvider != nil {
-		in, out := &in.BitwardenSecretManagerProvider, &out.BitwardenSecretManagerProvider
-		*out = new(BitwardenSecretManagerProvider)
-		**out = **in
-	}
-	if in.WebhookConfig != nil {
-		in, out := &in.WebhookConfig, &out.WebhookConfig
-		*out = new(WebhookConfig)
-		**out = **in
-	}
-	if in.CertManagerConfig != nil {
-		in, out := &in.CertManagerConfig, &out.CertManagerConfig
-		*out = new(CertManagerConfig)
-		(*in).DeepCopyInto(*out)
-	}
-	in.Resources.DeepCopyInto(&out.Resources)
-	if in.Affinity != nil {
-		in, out := &in.Affinity, &out.Affinity
-		*out = new(v1.Affinity)
-		(*in).DeepCopyInto(*out)
-	}
-	if in.Tolerations != nil {
-		in, out := &in.Tolerations, &out.Tolerations
-		*out = make([]v1.Toleration, len(*in))
+	out.TypeMeta = in.TypeMeta
+	in.ListMeta.DeepCopyInto(&out.ListMeta)
+	if in.Items != nil {
+		in, out := &in.Items, &out.Items
+		*out = make([]ExternalSecretsConfig, len(*in))
 		for i := range *in {
 			(*in)[i].DeepCopyInto(&(*out)[i])
 		}
 	}
-	if in.NodeSelector != nil {
-		in, out := &in.NodeSelector, &out.NodeSelector
-		*out = make(map[string]string, len(*in))
-		for key, val := range *in {
-			(*out)[key] = val
-		}
-	}
 }
 
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSecretsConfig.
-func (in *ExternalSecretsConfig) DeepCopy() *ExternalSecretsConfig {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSecretsConfigList.
+func (in *ExternalSecretsConfigList) DeepCopy() *ExternalSecretsConfigList {
 	if in == nil {
 		return nil
 	}
-	out := new(ExternalSecretsConfig)
+	out := new(ExternalSecretsConfigList)
 	in.DeepCopyInto(out)
 	return out
 }
 
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ExternalSecretsConfigList) DeepCopyObject() runtime.Object {
+	if c := in.DeepCopy(); c != nil {
+		return c
+	}
+	return nil
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExternalSecretsList) DeepCopyInto(out *ExternalSecretsList) {
+func (in *ExternalSecretsConfigSpec) DeepCopyInto(out *ExternalSecretsConfigSpec) {
 	*out = *in
-	out.TypeMeta = in.TypeMeta
-	in.ListMeta.DeepCopyInto(&out.ListMeta)
-	if in.Items != nil {
-		in, out := &in.Items, &out.Items
-		*out = make([]ExternalSecrets, len(*in))
-		for i := range *in {
-			(*in)[i].DeepCopyInto(&(*out)[i])
-		}
-	}
+	in.ApplicationConfig.DeepCopyInto(&out.ApplicationConfig)
+	in.Plugins.DeepCopyInto(&out.Plugins)
+	in.ControllerConfig.DeepCopyInto(&out.ControllerConfig)
 }
 
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSecretsList.
-func (in *ExternalSecretsList) DeepCopy() *ExternalSecretsList {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSecretsConfigSpec.
+func (in *ExternalSecretsConfigSpec) DeepCopy() *ExternalSecretsConfigSpec {
 	if in == nil {
 		return nil
 	}
-	out := new(ExternalSecretsList)
+	out := new(ExternalSecretsConfigSpec)
 	in.DeepCopyInto(out)
 	return out
 }
 
-// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
-func (in *ExternalSecretsList) DeepCopyObject() runtime.Object {
-	if c := in.DeepCopy(); c != nil {
-		return c
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ExternalSecretsConfigStatus) DeepCopyInto(out *ExternalSecretsConfigStatus) {
+	*out = *in
+	in.ConditionalStatus.DeepCopyInto(&out.ConditionalStatus)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSecretsConfigStatus.
+func (in *ExternalSecretsConfigStatus) DeepCopy() *ExternalSecretsConfigStatus {
+	if in == nil {
+		return nil
 	}
-	return nil
+	out := new(ExternalSecretsConfigStatus)
+	in.DeepCopyInto(out)
+	return out
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -323,8 +401,8 @@ func (in *ExternalSecretsManagerSpec) DeepCopyInto(out *ExternalSecretsManagerSp
 		*out = new(GlobalConfig)
 		(*in).DeepCopyInto(*out)
 	}
-	if in.Features != nil {
-		in, out := &in.Features, &out.Features
+	if in.OptionalFeatures != nil {
+		in, out := &in.OptionalFeatures, &out.OptionalFeatures
 		*out = make([]Feature, len(*in))
 		copy(*out, *in)
 	}
@@ -364,114 +442,89 @@ func (in *ExternalSecretsManagerStatus) DeepCopy() *ExternalSecretsManagerStatus
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExternalSecretsSpec) DeepCopyInto(out *ExternalSecretsSpec) {
+func (in *Feature) DeepCopyInto(out *Feature) {
 	*out = *in
-	if in.ExternalSecretsConfig != nil {
-		in, out := &in.ExternalSecretsConfig, &out.ExternalSecretsConfig
-		*out = new(ExternalSecretsConfig)
-		(*in).DeepCopyInto(*out)
-	}
-	if in.ControllerConfig != nil {
-		in, out := &in.ControllerConfig, &out.ControllerConfig
-		*out = new(ControllerConfig)
-		(*in).DeepCopyInto(*out)
-	}
 }
 
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSecretsSpec.
-func (in *ExternalSecretsSpec) DeepCopy() *ExternalSecretsSpec {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Feature.
+func (in *Feature) DeepCopy() *Feature {
 	if in == nil {
 		return nil
 	}
-	out := new(ExternalSecretsSpec)
+	out := new(Feature)
 	in.DeepCopyInto(out)
 	return out
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ExternalSecretsStatus) DeepCopyInto(out *ExternalSecretsStatus) {
+func (in *GlobalConfig) DeepCopyInto(out *GlobalConfig) {
 	*out = *in
-	in.ConditionalStatus.DeepCopyInto(&out.ConditionalStatus)
+	if in.Labels != nil {
+		in, out := &in.Labels, &out.Labels
+		*out = make(map[string]string, len(*in))
+		for key, val := range *in {
+			(*out)[key] = val
+		}
+	}
+	in.CommonConfigs.DeepCopyInto(&out.CommonConfigs)
 }
 
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSecretsStatus.
-func (in *ExternalSecretsStatus) DeepCopy() *ExternalSecretsStatus {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalConfig.
+func (in *GlobalConfig) DeepCopy() *GlobalConfig {
 	if in == nil {
 		return nil
 	}
-	out := new(ExternalSecretsStatus)
+	out := new(GlobalConfig)
 	in.DeepCopyInto(out)
 	return out
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *Feature) DeepCopyInto(out *Feature) {
+func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
 	*out = *in
 }
 
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Feature.
-func (in *Feature) DeepCopy() *Feature {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
+func (in *ObjectReference) DeepCopy() *ObjectReference {
 	if in == nil {
 		return nil
 	}
-	out := new(Feature)
+	out := new(ObjectReference)
 	in.DeepCopyInto(out)
 	return out
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *GlobalConfig) DeepCopyInto(out *GlobalConfig) {
+func (in *PluginsConfig) DeepCopyInto(out *PluginsConfig) {
 	*out = *in
-	in.Resources.DeepCopyInto(&out.Resources)
-	if in.Affinity != nil {
-		in, out := &in.Affinity, &out.Affinity
-		*out = new(v1.Affinity)
+	if in.BitwardenSecretManagerProvider != nil {
+		in, out := &in.BitwardenSecretManagerProvider, &out.BitwardenSecretManagerProvider
+		*out = new(BitwardenSecretManagerProvider)
 		(*in).DeepCopyInto(*out)
 	}
-	if in.Tolerations != nil {
-		in, out := &in.Tolerations, &out.Tolerations
-		*out = make([]v1.Toleration, len(*in))
-		for i := range *in {
-			(*in)[i].DeepCopyInto(&(*out)[i])
-		}
-	}
-	if in.NodeSelector != nil {
-		in, out := &in.NodeSelector, &out.NodeSelector
-		*out = make(map[string]string, len(*in))
-		for key, val := range *in {
-			(*out)[key] = val
-		}
-	}
-	if in.Labels != nil {
-		in, out := &in.Labels, &out.Labels
-		*out = make(map[string]string, len(*in))
-		for key, val := range *in {
-			(*out)[key] = val
-		}
-	}
 }
 
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlobalConfig.
-func (in *GlobalConfig) DeepCopy() *GlobalConfig {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginsConfig.
+func (in *PluginsConfig) DeepCopy() *PluginsConfig {
 	if in == nil {
 		return nil
 	}
-	out := new(GlobalConfig)
+	out := new(PluginsConfig)
 	in.DeepCopyInto(out)
 	return out
 }
 
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
-func (in *ObjectReference) DeepCopyInto(out *ObjectReference) {
+func (in *ProxyConfig) DeepCopyInto(out *ProxyConfig) {
 	*out = *in
 }
 
-// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference.
-func (in *ObjectReference) DeepCopy() *ObjectReference {
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProxyConfig.
+func (in *ProxyConfig) DeepCopy() *ProxyConfig {
 	if in == nil {
 		return nil
 	}
-	out := new(ObjectReference)
+	out := new(ProxyConfig)
 	in.DeepCopyInto(out)
 	return out
 }
@@ -494,7 +547,11 @@ func (in *SecretReference) DeepCopy() *SecretReference {
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *WebhookConfig) DeepCopyInto(out *WebhookConfig) {
 	*out = *in
-	out.CertificateCheckInterval = in.CertificateCheckInterval
+	if in.CertificateCheckInterval != nil {
+		in, out := &in.CertificateCheckInterval, &out.CertificateCheckInterval
+		*out = new(v1.Duration)
+		**out = **in
+	}
 }
 
 // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookConfig.
diff --git a/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml b/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml
index 5fe651bb..871cc306 100644
--- a/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml
+++ b/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml
@@ -195,7 +195,7 @@ metadata:
         },
         {
           "apiVersion": "operator.openshift.io/v1alpha1",
-          "kind": "ExternalSecrets",
+          "kind": "ExternalSecretsConfig",
           "metadata": {
             "labels": {
               "app": "external-secrets-operator"
@@ -220,7 +220,7 @@ metadata:
     categories: Security
     console.openshift.io/disable-operand-delete: "true"
     containerImage: openshift.io/external-secrets-operator:latest
-    createdAt: "2025-09-15T09:47:49Z"
+    createdAt: "2025-09-26T04:57:30Z"
     features.operators.openshift.io/cnf: "false"
     features.operators.openshift.io/cni: "false"
     features.operators.openshift.io/csi: "false"
@@ -232,7 +232,7 @@ metadata:
     features.operators.openshift.io/token-auth-azure: "false"
     features.operators.openshift.io/token-auth-gcp: "false"
     olm.skipRange: <1.0.0
-    operator.openshift.io/uninstall-message: The external secrets operator for Red
+    operator.openshift.io/uninstall-message: The External Secrets Operator for Red
       Hat OpenShift will be removed from external-secrets-operator namespace. If your
       Operator configured any off-cluster resources, these will continue to run and
       require manual cleanup. All operands created by the operator will need to be
@@ -280,15 +280,13 @@ spec:
       name: externalsecrets.external-secrets.io
       version: v1
     - description: |-
-        ExternalSecrets describes configuration and information about the managed external-secrets
-        deployment. The name must be `cluster` as ExternalSecrets is a singleton,
-        allowing only one instance per cluster.
+        ExternalSecretsConfig describes configuration and information about the managed external-secrets deployment.
+        The name must be `cluster` as ExternalSecretsConfig is a singleton, allowing only one instance per cluster.
 
-        When an ExternalSecrets is created, a new deployment is created which manages the
-        external-secrets and keeps it in the desired state.
-      displayName: ExternalSecrets
-      kind: ExternalSecrets
-      name: externalsecrets.operator.openshift.io
+        When an ExternalSecretsConfig is created, the controller installs the external-secrets and keeps it in the desired state.
+      displayName: ExternalSecretsConfig
+      kind: ExternalSecretsConfig
+      name: externalsecretsconfigs.operator.openshift.io
       version: v1alpha1
     - description: |-
         ExternalSecretsManager describes configuration and information about the deployments managed by
@@ -344,7 +342,7 @@ spec:
     - kind: Webhook
       name: webhooks.generators.external-secrets.io
       version: v1alpha1
-  description: external secrets operator for Red Hat OpenShift deploys and manages
+  description: External Secrets Operator for Red Hat OpenShift deploys and manages
     `external-secrets` application in OpenShift clusters. `external-secrets` provides
     an uniformed interface to fetch secrets stored in external providers like  AWS
     Secrets Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM
@@ -515,7 +513,7 @@ spec:
         - apiGroups:
           - operator.openshift.io
           resources:
-          - externalsecrets
+          - externalsecretsconfigs
           verbs:
           - create
           - get
@@ -525,14 +523,14 @@ spec:
         - apiGroups:
           - operator.openshift.io
           resources:
-          - externalsecrets/finalizers
+          - externalsecretsconfigs/finalizers
           - externalsecretsmanagers/finalizers
           verbs:
           - update
         - apiGroups:
           - operator.openshift.io
           resources:
-          - externalsecrets/status
+          - externalsecretsconfigs/status
           verbs:
           - get
           - update
diff --git a/bundle/manifests/operator.openshift.io_externalsecrets.yaml b/bundle/manifests/operator.openshift.io_externalsecretsconfigs.yaml
similarity index 87%
rename from bundle/manifests/operator.openshift.io_externalsecrets.yaml
rename to bundle/manifests/operator.openshift.io_externalsecretsconfigs.yaml
index e7a6879a..482135f6 100644
--- a/bundle/manifests/operator.openshift.io_externalsecrets.yaml
+++ b/bundle/manifests/operator.openshift.io_externalsecretsconfigs.yaml
@@ -4,26 +4,44 @@ metadata:
   annotations:
     controller-gen.kubebuilder.io/version: v0.17.3
   creationTimestamp: null
-  name: externalsecrets.operator.openshift.io
+  labels:
+    app.kubernetes.io/name: externalsecretsconfig
+    app.kubernetes.io/part-of: external-secrets-operator
+  name: externalsecretsconfigs.operator.openshift.io
 spec:
   group: operator.openshift.io
   names:
-    kind: ExternalSecrets
-    listKind: ExternalSecretsList
-    plural: externalsecrets
-    singular: externalsecrets
+    categories:
+    - external-secrets-operator
+    - external-secrets
+    kind: ExternalSecretsConfig
+    listKind: ExternalSecretsConfigList
+    plural: externalsecretsconfigs
+    shortNames:
+    - esc
+    - externalsecretsconfig
+    - esconfig
+    singular: externalsecretsconfig
   scope: Cluster
   versions:
-  - name: v1alpha1
+  - additionalPrinterColumns:
+    - jsonPath: .status.conditions[?(@.type=='Ready')].status
+      name: Ready
+      type: string
+    - jsonPath: .status.conditions[?(@.type=='Ready')].message
+      name: Message
+      type: string
+    - jsonPath: .metadata.creationTimestamp
+      name: AGE
+      type: date
+    name: v1alpha1
     schema:
       openAPIV3Schema:
         description: |-
-          ExternalSecrets describes configuration and information about the managed external-secrets
-          deployment. The name must be `cluster` as ExternalSecrets is a singleton,
-          allowing only one instance per cluster.
+          ExternalSecretsConfig describes configuration and information about the managed external-secrets deployment.
+          The name must be `cluster` as ExternalSecretsConfig is a singleton, allowing only one instance per cluster.
 
-          When an ExternalSecrets is created, a new deployment is created which manages the
-          external-secrets and keeps it in the desired state.
+          When an ExternalSecretsConfig is created, the controller installs the external-secrets and keeps it in the desired state.
         properties:
           apiVersion:
             description: |-
@@ -44,36 +62,11 @@ spec:
             type: object
           spec:
             description: spec is the specification of the desired behavior of the
-              ExternalSecrets.
+              ExternalSecretsConfig.
             properties:
-              controllerConfig:
-                description: |-
-                  controllerConfig is for configuring the controller for setting up
-                  defaults to enable external-secrets.
-                properties:
-                  labels:
-                    additionalProperties:
-                      type: string
-                    description: labels to apply to all resources created for external-secrets
-                      deployment.
-                    type: object
-                    x-kubernetes-map-type: granular
-                  namespace:
-                    default: external-secrets
-                    description: namespace is for configuring the namespace to install
-                      the external-secret operand.
-                    type: string
-                    x-kubernetes-validations:
-                    - message: namespace is immutable once set
-                      rule: self == oldSelf
-                type: object
-                x-kubernetes-validations:
-                - message: namespace may only be configured during creation
-                  rule: '!has(oldSelf.namespace) && !has(self.namespace) || has(oldSelf.namespace)
-                    && has(self.namespace)'
-              externalSecretsConfig:
-                description: externalSecretsConfig is for configuring the external-secrets
-                  behavior.
+              appConfig:
+                description: appConfig is for specifying the configurations for the
+                  `external-secrets` operand.
                 properties:
                   affinity:
                     description: |-
@@ -1004,106 +997,9 @@ spec:
                             x-kubernetes-list-type: atomic
                         type: object
                     type: object
-                  bitwardenSecretManagerProvider:
-                    description: |-
-                      bitwardenSecretManagerProvider is for enabling the bitwarden secrets manager provider and
-                      for setting up the additional service required for connecting with the bitwarden server.
-                    properties:
-                      enabled:
-                        default: "false"
-                        description: |-
-                          enabled is for enabling the bitwarden secrets manager provider, which can be indicated
-                          by setting `true` or `false`.
-                        enum:
-                        - "true"
-                        - "false"
-                        type: string
-                      secretRef:
-                        description: |-
-                          SecretRef is the kubernetes secret containing the TLS key pair to be used for the bitwarden server.
-                          The issuer in CertManagerConfig will be utilized to generate the required certificate if the secret
-                          reference is not provided and CertManagerConfig is configured. The key names in secret for certificate
-                          must be `tls.crt`, for private key must be `tls.key` and for CA certificate key name must be `ca.crt`.
-                        properties:
-                          name:
-                            description: Name of the secret resource being referred
-                              to.
-                            type: string
-                        required:
-                        - name
-                        type: object
-                    type: object
-                  certManagerConfig:
-                    description: |-
-                      CertManagerConfig is for configuring cert-manager specifics, which will be used for generating
-                      certificates for webhook and bitwarden-sdk-server components.
-                    properties:
-                      addInjectorAnnotations:
-                        default: "false"
-                        description: |-
-                          addInjectorAnnotations is for adding the `cert-manager.io/inject-ca-from` annotation to the
-                          webhooks and CRDs to automatically setup webhook to the cert-manager CA. This requires
-                          CA Injector to be enabled in cert-manager. Use `true` or `false` to indicate the preference.
-                        enum:
-                        - "true"
-                        - "false"
-                        type: string
-                      certificateDuration:
-                        default: 8760h
-                        description: certificateDuration is the validity period of
-                          the webhook certificate.
-                        type: string
-                      certificateRenewBefore:
-                        default: 30m
-                        description: |-
-                          certificateRenewBefore is the ahead time to renew the webhook certificate
-                          before expiry.
-                        type: string
-                      enabled:
-                        default: "false"
-                        description: |-
-                          enabled is for enabling the use of cert-manager for obtaining and renewing the
-                          certificates used for webhook server, instead of built-in certificates.
-                          Use `true` or `false` to indicate the preference.
-                        enum:
-                        - "true"
-                        - "false"
-                        type: string
-                        x-kubernetes-validations:
-                        - message: enabled is immutable once set
-                          rule: self == oldSelf
-                      issuerRef:
-                        description: |-
-                          issuerRef contains details to the referenced object used for
-                          obtaining the certificates. It must exist in the external-secrets
-                          namespace if not using a cluster-scoped cert-manager issuer.
-                        properties:
-                          group:
-                            description: Group of the resource being referred to.
-                            type: string
-                          kind:
-                            description: Kind of the resource being referred to.
-                            type: string
-                          name:
-                            description: Name of the resource being referred to.
-                            type: string
-                        required:
-                        - name
-                        type: object
-                        x-kubernetes-validations:
-                        - message: issuerRef is immutable once set
-                          rule: self == oldSelf
-                    required:
-                    - enabled
-                    - issuerRef
-                    type: object
-                    x-kubernetes-validations:
-                    - message: certManagerConfig must have enabled set, to set addInjectorAnnotations
-                      rule: 'has(self.addInjectorAnnotations) && self.addInjectorAnnotations
-                        != ''false'' ? self.enabled != ''false'' : true'
                   logLevel:
                     default: 1
-                    description: logLevel supports value range as per [kubernetes
+                    description: logLevel supports value range as per [Kubernetes
                       logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
                     format: int32
                     maximum: 5
@@ -1115,13 +1011,45 @@ spec:
                     description: |-
                       nodeSelector is for defining the scheduling criteria using node labels.
                       ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+                      This field can have a maximum of 50 entries.
+                    maxProperties: 50
+                    minProperties: 0
                     type: object
                     x-kubernetes-map-type: atomic
                   operatingNamespace:
                     description: |-
-                      operatingNamespace is for restricting the external-secrets operations to provided namespace.
-                      And when enabled `ClusterSecretStore` and `ClusterExternalSecret` are implicitly disabled.
+                      operatingNamespace is for restricting the external-secrets operations to the provided namespace.
+                      When configured `ClusterSecretStore` and `ClusterExternalSecret` are implicitly disabled.
+                    maxLength: 63
+                    minLength: 1
                     type: string
+                  proxy:
+                    description: proxy is for setting the proxy configurations which
+                      will be made available in operand containers managed by the
+                      operator as environment variables.
+                    properties:
+                      httpProxy:
+                        description: |-
+                          httpProxy is the URL of the proxy for HTTP requests.
+                          This field can have a maximum of 2048 characters.
+                        maxLength: 2048
+                        minLength: 0
+                        type: string
+                      httpsProxy:
+                        description: |-
+                          httpsProxy is the URL of the proxy for HTTPS requests.
+                          This field can have a maximum of 2048 characters.
+                        maxLength: 2048
+                        minLength: 0
+                        type: string
+                      noProxy:
+                        description: |-
+                          noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used.
+                          This field can have a maximum of 4096 characters.
+                        maxLength: 4096
+                        minLength: 0
+                        type: string
+                    type: object
                   resources:
                     description: |-
                       resources is for defining the resource requirements.
@@ -1188,6 +1116,7 @@ spec:
                     description: |-
                       tolerations is for setting the pod tolerations.
                       ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+                      This field can have a maximum of 50 entries.
                     items:
                       description: |-
                         The pod this Toleration is attached to tolerates any taint that matches
@@ -1224,6 +1153,8 @@ spec:
                             If the operator is Exists, the value should be empty, otherwise just a regular string.
                           type: string
                       type: object
+                    maxItems: 50
+                    minItems: 0
                     type: array
                     x-kubernetes-list-type: atomic
                   webhookConfig:
@@ -1232,16 +1163,180 @@ spec:
                     properties:
                       certificateCheckInterval:
                         default: 5m
+                        description: CertificateCheckInterval is for configuring the
+                          polling interval to check the certificate validity.
+                        type: string
+                    type: object
+                type: object
+              controllerConfig:
+                description: controllerConfig is for specifying the configurations
+                  for the controller to use while installing the `external-secrets`
+                  operand and the plugins.
+                properties:
+                  certProvider:
+                    description: certProvider is for defining the configuration for
+                      certificate providers used to manage TLS certificates for webhook
+                      and plugins.
+                    properties:
+                      certManager:
+                        description: certManager is for configuring cert-manager provider
+                          specifics.
+                        properties:
+                          certificateDuration:
+                            default: 8760h
+                            description: certificateDuration is the validity period
+                              of the webhook certificate.
+                            type: string
+                          certificateRenewBefore:
+                            default: 30m
+                            description: certificateRenewBefore is the ahead time
+                              to renew the webhook certificate before expiry.
+                            type: string
+                          injectAnnotations:
+                            default: "false"
+                            description: |-
+                              injectAnnotations is for adding the `cert-manager.io/inject-ca-from` annotation to the webhooks and CRDs to automatically setup webhook to use the cert-manager CA. This requires CA Injector to be enabled in cert-manager.
+                              Use `true` or `false` to indicate the preference. This field is immutable once set.
+                            enum:
+                            - "true"
+                            - "false"
+                            type: string
+                            x-kubernetes-validations:
+                            - message: injectAnnotations is immutable once set
+                              rule: self == oldSelf
+                          issuerRef:
+                            description: |-
+                              issuerRef contains details of the referenced object used for obtaining certificates.
+                              When `issuerRef.Kind` is `Issuer`, it must exist in the `external-secrets` namespace.
+                              This field is immutable once set.
+                            properties:
+                              group:
+                                description: Group of the resource being referred
+                                  to.
+                                maxLength: 253
+                                minLength: 1
+                                type: string
+                              kind:
+                                description: Kind of the resource being referred to.
+                                maxLength: 253
+                                minLength: 1
+                                type: string
+                              name:
+                                description: Name of the resource being referred to.
+                                maxLength: 253
+                                minLength: 1
+                                type: string
+                            required:
+                            - name
+                            type: object
+                            x-kubernetes-validations:
+                            - message: issuerRef is immutable once set
+                              rule: self == oldSelf
+                            - message: kind must be either 'Issuer' or 'ClusterIssuer'
+                              rule: '!has(self.kind) || self.kind.lowerAscii() ==
+                                ''issuer'' || self.kind.lowerAscii() == ''clusterissuer'''
+                            - message: group must be 'cert-manager.io'
+                              rule: '!has(self.group) || self.group.lowerAscii() ==
+                                ''cert-manager.io'''
+                          mode:
+                            default: Disabled
+                            description: |-
+                              mode indicates whether to use cert-manager for certificate management, instead of built-in cert-controller.
+                              Enabled: Makes use of cert-manager for obtaining the certificates for webhook server and other components.
+                              Disabled: Makes use of in-built cert-controller for obtaining the certificates for webhook server, which is the default behavior.
+                              This field is immutable once set.
+                            enum:
+                            - Enabled
+                            - Disabled
+                            type: string
+                            x-kubernetes-validations:
+                            - message: mode is immutable once set
+                              rule: self == oldSelf
+                        required:
+                        - mode
+                        type: object
+                        x-kubernetes-validations:
+                        - message: issuerRef must be provided when mode is set to
+                            Enabled.
+                          rule: self.mode != 'Enabled' || has(self.issuerRef)
+                        - message: injectAnnotations can only be set when mode is
+                            set to Enabled.
+                          rule: 'has(self.injectAnnotations) && self.injectAnnotations
+                            != ''false'' ? self.mode != ''Disabled'' : true'
+                    type: object
+                  labels:
+                    additionalProperties:
+                      type: string
+                    description: |-
+                      labels to apply to all resources created for the external-secrets operand deployment.
+                      This field can have a maximum of 20 entries.
+                    maxProperties: 20
+                    minProperties: 0
+                    type: object
+                    x-kubernetes-map-type: granular
+                  periodicReconcileInterval:
+                    default: 300
+                    description: |-
+                      periodicReconcileInterval specifies the time interval in seconds for periodic reconciliation by the operator.
+                      This controls how often the operator checks resources created for external-secrets operand to ensure they remain in desired state.
+                      Interval can have value between 120-18000 seconds (2 minutes to 5 hours). Defaults to 300 seconds (5 minutes) if not specified.
+                    format: int32
+                    maximum: 18000
+                    minimum: 120
+                    type: integer
+                type: object
+              plugins:
+                description: plugins is for configuring the optional provider plugins.
+                properties:
+                  bitwardenSecretManagerProvider:
+                    description: bitwardenSecretManagerProvider is for enabling the
+                      bitwarden secrets manager provider plugin for connecting with
+                      the bitwarden secrets manager.
+                    properties:
+                      mode:
+                        default: Disabled
                         description: |-
-                          CertificateCheckInterval is for configuring the polling interval to check the certificate
-                          validity.
+                          mode indicates bitwarden secrets manager provider state, which can be indicated by setting Enabled or Disabled.
+                          Enabled: Enables the Bitwarden provider plugin. The operator will ensure the plugin is deployed and its state is synchronized.
+                          Disabled: Disables reconciliation of the Bitwarden provider plugin. The plugin and its resources will remain in their current state and will not be managed by the operator.
+                        enum:
+                        - Enabled
+                        - Disabled
                         type: string
+                      secretRef:
+                        description: |-
+                          SecretRef is the Kubernetes secret containing the TLS key pair to be used for the bitwarden server.
+                          The issuer in CertManagerConfig will be utilized to generate the required certificate if the secret reference is not provided and CertManagerConfig is configured.
+                          The key names in secret for certificate must be `tls.crt`, for private key must be `tls.key` and for CA certificate key name must be `ca.crt`.
+                        properties:
+                          name:
+                            description: Name of the secret resource being referred
+                              to.
+                            maxLength: 253
+                            minLength: 1
+                            type: string
+                        required:
+                        - name
+                        type: object
                     type: object
                 type: object
             type: object
+            x-kubernetes-validations:
+            - message: secretRef or certManager must be configured when bitwardenSecretManagerProvider
+                plugin is enabled
+              rule: '!has(self.plugins) || !has(self.plugins.bitwardenSecretManagerProvider)
+                || !has(self.plugins.bitwardenSecretManagerProvider.mode) || self.plugins.bitwardenSecretManagerProvider.mode
+                != ''Enabled'' || has(self.plugins.bitwardenSecretManagerProvider.secretRef)
+                || (has(self.controllerConfig) && has(self.controllerConfig.certProvider)
+                && has(self.controllerConfig.certProvider.certManager) && has(self.controllerConfig.certProvider.certManager.mode)
+                && self.controllerConfig.certProvider.certManager.mode == ''Enabled'')'
           status:
-            description: status is the most recently observed status of the ExternalSecrets.
+            description: status is the most recently observed status of the ExternalSecretsConfig.
             properties:
+              bitwardenSDKServerImage:
+                description: BitwardenSDKServerImage is the name of the image and
+                  the tag used for deploying bitwarden-sdk-server.
+                type: string
               conditions:
                 description: conditions holds information of the current state of
                   deployment.
@@ -1310,7 +1405,7 @@ spec:
             type: object
         type: object
         x-kubernetes-validations:
-        - message: ExternalSecrets is a singleton, .metadata.name must be 'cluster'
+        - message: ExternalSecretsConfig is a singleton, .metadata.name must be 'cluster'
           rule: self.metadata.name == 'cluster'
     served: true
     storage: true
diff --git a/bundle/manifests/operator.openshift.io_externalsecretsmanagers.yaml b/bundle/manifests/operator.openshift.io_externalsecretsmanagers.yaml
index 1f959f30..61a88270 100644
--- a/bundle/manifests/operator.openshift.io_externalsecretsmanagers.yaml
+++ b/bundle/manifests/operator.openshift.io_externalsecretsmanagers.yaml
@@ -4,27 +4,39 @@ metadata:
   annotations:
     controller-gen.kubebuilder.io/version: v0.17.3
   creationTimestamp: null
+  labels:
+    app.kubernetes.io/name: externalsecretsmanager
+    app.kubernetes.io/part-of: external-secrets-operator
   name: externalsecretsmanagers.operator.openshift.io
 spec:
   group: operator.openshift.io
   names:
+    categories:
+    - external-secrets-operator
+    - external-secrets
     kind: ExternalSecretsManager
     listKind: ExternalSecretsManagerList
     plural: externalsecretsmanagers
+    shortNames:
+    - esm
+    - externalsecretsmanager
+    - esmanager
     singular: externalsecretsmanager
   scope: Cluster
   versions:
-  - name: v1alpha1
+  - additionalPrinterColumns:
+    - jsonPath: .metadata.creationTimestamp
+      name: AGE
+      type: date
+    name: v1alpha1
     schema:
       openAPIV3Schema:
         description: |-
-          ExternalSecretsManager describes configuration and information about the deployments managed by
-          the external-secrets-operator. The name must be `cluster` as this is a singleton object allowing
-          only one instance of ExternalSecretsManager per cluster.
+          ExternalSecretsManager describes configuration and information about the deployments managed by the external-secrets-operator.
+          The name must be `cluster` as this is a singleton object allowing only one instance of ExternalSecretsManager per cluster.
 
-          It is mainly for configuring the global options and enabling optional features, which
-          serves as a common/centralized config for managing multiple controllers of the operator. The object
-          is automatically created during the operator installation.
+          It is mainly for configuring the global options and enabling optional features, which serves as a common/centralized config for managing multiple controllers of the operator.
+          The object is automatically created during the operator installation.
         properties:
           apiVersion:
             description: |-
@@ -46,29 +58,9 @@ spec:
           spec:
             description: spec is the specification of the desired behavior
             properties:
-              features:
-                description: features is for enabling the optional operator features.
-                items:
-                  description: |-
-                    Feature is for enabling the optional features.
-                    Feature is for enabling the optional features.
-                  properties:
-                    enabled:
-                      description: enabled determines if feature should be turned
-                        on.
-                      type: boolean
-                    name:
-                      description: name of the optional feature.
-                      type: string
-                  required:
-                  - enabled
-                  - name
-                  type: object
-                type: array
               globalConfig:
-                description: |-
-                  globalConfig is for configuring the behavior of deployments that are managed
-                  by external secrets-operator.
+                description: globalConfig is for configuring the behavior of deployments
+                  that are managed by external secrets-operator.
                 properties:
                   affinity:
                     description: |-
@@ -1002,13 +994,16 @@ spec:
                   labels:
                     additionalProperties:
                       type: string
-                    description: labels to apply to all resources created for external-secrets
-                      deployment.
+                    description: |-
+                      labels to apply to all resources created by the operator.
+                      This field can have a maximum of 20 entries.
+                    maxProperties: 20
+                    minProperties: 0
                     type: object
                     x-kubernetes-map-type: granular
                   logLevel:
                     default: 1
-                    description: logLevel supports value range as per [kubernetes
+                    description: logLevel supports value range as per [Kubernetes
                       logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
                     format: int32
                     maximum: 5
@@ -1020,8 +1015,38 @@ spec:
                     description: |-
                       nodeSelector is for defining the scheduling criteria using node labels.
                       ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+                      This field can have a maximum of 50 entries.
+                    maxProperties: 50
+                    minProperties: 0
                     type: object
                     x-kubernetes-map-type: atomic
+                  proxy:
+                    description: proxy is for setting the proxy configurations which
+                      will be made available in operand containers managed by the
+                      operator as environment variables.
+                    properties:
+                      httpProxy:
+                        description: |-
+                          httpProxy is the URL of the proxy for HTTP requests.
+                          This field can have a maximum of 2048 characters.
+                        maxLength: 2048
+                        minLength: 0
+                        type: string
+                      httpsProxy:
+                        description: |-
+                          httpsProxy is the URL of the proxy for HTTPS requests.
+                          This field can have a maximum of 2048 characters.
+                        maxLength: 2048
+                        minLength: 0
+                        type: string
+                      noProxy:
+                        description: |-
+                          noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used.
+                          This field can have a maximum of 4096 characters.
+                        maxLength: 4096
+                        minLength: 0
+                        type: string
+                    type: object
                   resources:
                     description: |-
                       resources is for defining the resource requirements.
@@ -1088,6 +1113,7 @@ spec:
                     description: |-
                       tolerations is for setting the pod tolerations.
                       ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+                      This field can have a maximum of 50 entries.
                     items:
                       description: |-
                         The pod this Toleration is attached to tolerates any taint that matches
@@ -1124,14 +1150,45 @@ spec:
                             If the operator is Exists, the value should be empty, otherwise just a regular string.
                           type: string
                       type: object
+                    maxItems: 50
+                    minItems: 0
                     type: array
                     x-kubernetes-list-type: atomic
                 type: object
+              optionalFeatures:
+                description: optionalFeatures is for enabling the optional operator
+                  features.
+                items:
+                  description: Feature is for enabling the optional features.
+                  properties:
+                    mode:
+                      description: |-
+                        mode indicates the feature state.
+                        Use Enabled or Disabled to indicate the preference.
+                        Enabled: Enables the optional feature and creates resources if required.
+                        Disabled: Disables the optional feature, but will not remove any resources created.
+                      enum:
+                      - Enabled
+                      - Disabled
+                      type: string
+                    name:
+                      description: name of the optional feature. There are no optional
+                        features currently supported.
+                      enum:
+                      - ""
+                      type: string
+                  required:
+                  - mode
+                  - name
+                  type: object
+                type: array
+                x-kubernetes-list-map-keys:
+                - name
+                x-kubernetes-list-type: map
             type: object
           status:
-            description: |-
-              status is the most recently observed status of controllers used by
-              External Secrets Operator.
+            description: status is the most recently observed status of controllers
+              used by External Secrets Operator.
             properties:
               controllerStatuses:
                 description: controllerStatuses holds the observed conditions of the
diff --git a/config/crd/bases/operator.openshift.io_externalsecrets.yaml b/config/crd/bases/operator.openshift.io_externalsecretsconfigs.yaml
similarity index 87%
rename from config/crd/bases/operator.openshift.io_externalsecrets.yaml
rename to config/crd/bases/operator.openshift.io_externalsecretsconfigs.yaml
index 96659e8b..9ae490e8 100644
--- a/config/crd/bases/operator.openshift.io_externalsecrets.yaml
+++ b/config/crd/bases/operator.openshift.io_externalsecretsconfigs.yaml
@@ -4,26 +4,44 @@ kind: CustomResourceDefinition
 metadata:
   annotations:
     controller-gen.kubebuilder.io/version: v0.17.3
-  name: externalsecrets.operator.openshift.io
+  labels:
+    app.kubernetes.io/name: externalsecretsconfig
+    app.kubernetes.io/part-of: external-secrets-operator
+  name: externalsecretsconfigs.operator.openshift.io
 spec:
   group: operator.openshift.io
   names:
-    kind: ExternalSecrets
-    listKind: ExternalSecretsList
-    plural: externalsecrets
-    singular: externalsecrets
+    categories:
+    - external-secrets-operator
+    - external-secrets
+    kind: ExternalSecretsConfig
+    listKind: ExternalSecretsConfigList
+    plural: externalsecretsconfigs
+    shortNames:
+    - esc
+    - externalsecretsconfig
+    - esconfig
+    singular: externalsecretsconfig
   scope: Cluster
   versions:
-  - name: v1alpha1
+  - additionalPrinterColumns:
+    - jsonPath: .status.conditions[?(@.type=='Ready')].status
+      name: Ready
+      type: string
+    - jsonPath: .status.conditions[?(@.type=='Ready')].message
+      name: Message
+      type: string
+    - jsonPath: .metadata.creationTimestamp
+      name: AGE
+      type: date
+    name: v1alpha1
     schema:
       openAPIV3Schema:
         description: |-
-          ExternalSecrets describes configuration and information about the managed external-secrets
-          deployment. The name must be `cluster` as ExternalSecrets is a singleton,
-          allowing only one instance per cluster.
+          ExternalSecretsConfig describes configuration and information about the managed external-secrets deployment.
+          The name must be `cluster` as ExternalSecretsConfig is a singleton, allowing only one instance per cluster.
 
-          When an ExternalSecrets is created, a new deployment is created which manages the
-          external-secrets and keeps it in the desired state.
+          When an ExternalSecretsConfig is created, the controller installs the external-secrets and keeps it in the desired state.
         properties:
           apiVersion:
             description: |-
@@ -44,36 +62,11 @@ spec:
             type: object
           spec:
             description: spec is the specification of the desired behavior of the
-              ExternalSecrets.
+              ExternalSecretsConfig.
             properties:
-              controllerConfig:
-                description: |-
-                  controllerConfig is for configuring the controller for setting up
-                  defaults to enable external-secrets.
-                properties:
-                  labels:
-                    additionalProperties:
-                      type: string
-                    description: labels to apply to all resources created for external-secrets
-                      deployment.
-                    type: object
-                    x-kubernetes-map-type: granular
-                  namespace:
-                    default: external-secrets
-                    description: namespace is for configuring the namespace to install
-                      the external-secret operand.
-                    type: string
-                    x-kubernetes-validations:
-                    - message: namespace is immutable once set
-                      rule: self == oldSelf
-                type: object
-                x-kubernetes-validations:
-                - message: namespace may only be configured during creation
-                  rule: '!has(oldSelf.namespace) && !has(self.namespace) || has(oldSelf.namespace)
-                    && has(self.namespace)'
-              externalSecretsConfig:
-                description: externalSecretsConfig is for configuring the external-secrets
-                  behavior.
+              appConfig:
+                description: appConfig is for specifying the configurations for the
+                  `external-secrets` operand.
                 properties:
                   affinity:
                     description: |-
@@ -1004,106 +997,9 @@ spec:
                             x-kubernetes-list-type: atomic
                         type: object
                     type: object
-                  bitwardenSecretManagerProvider:
-                    description: |-
-                      bitwardenSecretManagerProvider is for enabling the bitwarden secrets manager provider and
-                      for setting up the additional service required for connecting with the bitwarden server.
-                    properties:
-                      enabled:
-                        default: "false"
-                        description: |-
-                          enabled is for enabling the bitwarden secrets manager provider, which can be indicated
-                          by setting `true` or `false`.
-                        enum:
-                        - "true"
-                        - "false"
-                        type: string
-                      secretRef:
-                        description: |-
-                          SecretRef is the kubernetes secret containing the TLS key pair to be used for the bitwarden server.
-                          The issuer in CertManagerConfig will be utilized to generate the required certificate if the secret
-                          reference is not provided and CertManagerConfig is configured. The key names in secret for certificate
-                          must be `tls.crt`, for private key must be `tls.key` and for CA certificate key name must be `ca.crt`.
-                        properties:
-                          name:
-                            description: Name of the secret resource being referred
-                              to.
-                            type: string
-                        required:
-                        - name
-                        type: object
-                    type: object
-                  certManagerConfig:
-                    description: |-
-                      CertManagerConfig is for configuring cert-manager specifics, which will be used for generating
-                      certificates for webhook and bitwarden-sdk-server components.
-                    properties:
-                      addInjectorAnnotations:
-                        default: "false"
-                        description: |-
-                          addInjectorAnnotations is for adding the `cert-manager.io/inject-ca-from` annotation to the
-                          webhooks and CRDs to automatically setup webhook to the cert-manager CA. This requires
-                          CA Injector to be enabled in cert-manager. Use `true` or `false` to indicate the preference.
-                        enum:
-                        - "true"
-                        - "false"
-                        type: string
-                      certificateDuration:
-                        default: 8760h
-                        description: certificateDuration is the validity period of
-                          the webhook certificate.
-                        type: string
-                      certificateRenewBefore:
-                        default: 30m
-                        description: |-
-                          certificateRenewBefore is the ahead time to renew the webhook certificate
-                          before expiry.
-                        type: string
-                      enabled:
-                        default: "false"
-                        description: |-
-                          enabled is for enabling the use of cert-manager for obtaining and renewing the
-                          certificates used for webhook server, instead of built-in certificates.
-                          Use `true` or `false` to indicate the preference.
-                        enum:
-                        - "true"
-                        - "false"
-                        type: string
-                        x-kubernetes-validations:
-                        - message: enabled is immutable once set
-                          rule: self == oldSelf
-                      issuerRef:
-                        description: |-
-                          issuerRef contains details to the referenced object used for
-                          obtaining the certificates. It must exist in the external-secrets
-                          namespace if not using a cluster-scoped cert-manager issuer.
-                        properties:
-                          group:
-                            description: Group of the resource being referred to.
-                            type: string
-                          kind:
-                            description: Kind of the resource being referred to.
-                            type: string
-                          name:
-                            description: Name of the resource being referred to.
-                            type: string
-                        required:
-                        - name
-                        type: object
-                        x-kubernetes-validations:
-                        - message: issuerRef is immutable once set
-                          rule: self == oldSelf
-                    required:
-                    - enabled
-                    - issuerRef
-                    type: object
-                    x-kubernetes-validations:
-                    - message: certManagerConfig must have enabled set, to set addInjectorAnnotations
-                      rule: 'has(self.addInjectorAnnotations) && self.addInjectorAnnotations
-                        != ''false'' ? self.enabled != ''false'' : true'
                   logLevel:
                     default: 1
-                    description: logLevel supports value range as per [kubernetes
+                    description: logLevel supports value range as per [Kubernetes
                       logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
                     format: int32
                     maximum: 5
@@ -1115,13 +1011,45 @@ spec:
                     description: |-
                       nodeSelector is for defining the scheduling criteria using node labels.
                       ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+                      This field can have a maximum of 50 entries.
+                    maxProperties: 50
+                    minProperties: 0
                     type: object
                     x-kubernetes-map-type: atomic
                   operatingNamespace:
                     description: |-
-                      operatingNamespace is for restricting the external-secrets operations to provided namespace.
-                      And when enabled `ClusterSecretStore` and `ClusterExternalSecret` are implicitly disabled.
+                      operatingNamespace is for restricting the external-secrets operations to the provided namespace.
+                      When configured `ClusterSecretStore` and `ClusterExternalSecret` are implicitly disabled.
+                    maxLength: 63
+                    minLength: 1
                     type: string
+                  proxy:
+                    description: proxy is for setting the proxy configurations which
+                      will be made available in operand containers managed by the
+                      operator as environment variables.
+                    properties:
+                      httpProxy:
+                        description: |-
+                          httpProxy is the URL of the proxy for HTTP requests.
+                          This field can have a maximum of 2048 characters.
+                        maxLength: 2048
+                        minLength: 0
+                        type: string
+                      httpsProxy:
+                        description: |-
+                          httpsProxy is the URL of the proxy for HTTPS requests.
+                          This field can have a maximum of 2048 characters.
+                        maxLength: 2048
+                        minLength: 0
+                        type: string
+                      noProxy:
+                        description: |-
+                          noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used.
+                          This field can have a maximum of 4096 characters.
+                        maxLength: 4096
+                        minLength: 0
+                        type: string
+                    type: object
                   resources:
                     description: |-
                       resources is for defining the resource requirements.
@@ -1188,6 +1116,7 @@ spec:
                     description: |-
                       tolerations is for setting the pod tolerations.
                       ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+                      This field can have a maximum of 50 entries.
                     items:
                       description: |-
                         The pod this Toleration is attached to tolerates any taint that matches
@@ -1224,6 +1153,8 @@ spec:
                             If the operator is Exists, the value should be empty, otherwise just a regular string.
                           type: string
                       type: object
+                    maxItems: 50
+                    minItems: 0
                     type: array
                     x-kubernetes-list-type: atomic
                   webhookConfig:
@@ -1232,16 +1163,180 @@ spec:
                     properties:
                       certificateCheckInterval:
                         default: 5m
+                        description: CertificateCheckInterval is for configuring the
+                          polling interval to check the certificate validity.
+                        type: string
+                    type: object
+                type: object
+              controllerConfig:
+                description: controllerConfig is for specifying the configurations
+                  for the controller to use while installing the `external-secrets`
+                  operand and the plugins.
+                properties:
+                  certProvider:
+                    description: certProvider is for defining the configuration for
+                      certificate providers used to manage TLS certificates for webhook
+                      and plugins.
+                    properties:
+                      certManager:
+                        description: certManager is for configuring cert-manager provider
+                          specifics.
+                        properties:
+                          certificateDuration:
+                            default: 8760h
+                            description: certificateDuration is the validity period
+                              of the webhook certificate.
+                            type: string
+                          certificateRenewBefore:
+                            default: 30m
+                            description: certificateRenewBefore is the ahead time
+                              to renew the webhook certificate before expiry.
+                            type: string
+                          injectAnnotations:
+                            default: "false"
+                            description: |-
+                              injectAnnotations is for adding the `cert-manager.io/inject-ca-from` annotation to the webhooks and CRDs to automatically setup webhook to use the cert-manager CA. This requires CA Injector to be enabled in cert-manager.
+                              Use `true` or `false` to indicate the preference. This field is immutable once set.
+                            enum:
+                            - "true"
+                            - "false"
+                            type: string
+                            x-kubernetes-validations:
+                            - message: injectAnnotations is immutable once set
+                              rule: self == oldSelf
+                          issuerRef:
+                            description: |-
+                              issuerRef contains details of the referenced object used for obtaining certificates.
+                              When `issuerRef.Kind` is `Issuer`, it must exist in the `external-secrets` namespace.
+                              This field is immutable once set.
+                            properties:
+                              group:
+                                description: Group of the resource being referred
+                                  to.
+                                maxLength: 253
+                                minLength: 1
+                                type: string
+                              kind:
+                                description: Kind of the resource being referred to.
+                                maxLength: 253
+                                minLength: 1
+                                type: string
+                              name:
+                                description: Name of the resource being referred to.
+                                maxLength: 253
+                                minLength: 1
+                                type: string
+                            required:
+                            - name
+                            type: object
+                            x-kubernetes-validations:
+                            - message: issuerRef is immutable once set
+                              rule: self == oldSelf
+                            - message: kind must be either 'Issuer' or 'ClusterIssuer'
+                              rule: '!has(self.kind) || self.kind.lowerAscii() ==
+                                ''issuer'' || self.kind.lowerAscii() == ''clusterissuer'''
+                            - message: group must be 'cert-manager.io'
+                              rule: '!has(self.group) || self.group.lowerAscii() ==
+                                ''cert-manager.io'''
+                          mode:
+                            default: Disabled
+                            description: |-
+                              mode indicates whether to use cert-manager for certificate management, instead of built-in cert-controller.
+                              Enabled: Makes use of cert-manager for obtaining the certificates for webhook server and other components.
+                              Disabled: Makes use of in-built cert-controller for obtaining the certificates for webhook server, which is the default behavior.
+                              This field is immutable once set.
+                            enum:
+                            - Enabled
+                            - Disabled
+                            type: string
+                            x-kubernetes-validations:
+                            - message: mode is immutable once set
+                              rule: self == oldSelf
+                        required:
+                        - mode
+                        type: object
+                        x-kubernetes-validations:
+                        - message: issuerRef must be provided when mode is set to
+                            Enabled.
+                          rule: self.mode != 'Enabled' || has(self.issuerRef)
+                        - message: injectAnnotations can only be set when mode is
+                            set to Enabled.
+                          rule: 'has(self.injectAnnotations) && self.injectAnnotations
+                            != ''false'' ? self.mode != ''Disabled'' : true'
+                    type: object
+                  labels:
+                    additionalProperties:
+                      type: string
+                    description: |-
+                      labels to apply to all resources created for the external-secrets operand deployment.
+                      This field can have a maximum of 20 entries.
+                    maxProperties: 20
+                    minProperties: 0
+                    type: object
+                    x-kubernetes-map-type: granular
+                  periodicReconcileInterval:
+                    default: 300
+                    description: |-
+                      periodicReconcileInterval specifies the time interval in seconds for periodic reconciliation by the operator.
+                      This controls how often the operator checks resources created for external-secrets operand to ensure they remain in desired state.
+                      Interval can have value between 120-18000 seconds (2 minutes to 5 hours). Defaults to 300 seconds (5 minutes) if not specified.
+                    format: int32
+                    maximum: 18000
+                    minimum: 120
+                    type: integer
+                type: object
+              plugins:
+                description: plugins is for configuring the optional provider plugins.
+                properties:
+                  bitwardenSecretManagerProvider:
+                    description: bitwardenSecretManagerProvider is for enabling the
+                      bitwarden secrets manager provider plugin for connecting with
+                      the bitwarden secrets manager.
+                    properties:
+                      mode:
+                        default: Disabled
                         description: |-
-                          CertificateCheckInterval is for configuring the polling interval to check the certificate
-                          validity.
+                          mode indicates bitwarden secrets manager provider state, which can be indicated by setting Enabled or Disabled.
+                          Enabled: Enables the Bitwarden provider plugin. The operator will ensure the plugin is deployed and its state is synchronized.
+                          Disabled: Disables reconciliation of the Bitwarden provider plugin. The plugin and its resources will remain in their current state and will not be managed by the operator.
+                        enum:
+                        - Enabled
+                        - Disabled
                         type: string
+                      secretRef:
+                        description: |-
+                          SecretRef is the Kubernetes secret containing the TLS key pair to be used for the bitwarden server.
+                          The issuer in CertManagerConfig will be utilized to generate the required certificate if the secret reference is not provided and CertManagerConfig is configured.
+                          The key names in secret for certificate must be `tls.crt`, for private key must be `tls.key` and for CA certificate key name must be `ca.crt`.
+                        properties:
+                          name:
+                            description: Name of the secret resource being referred
+                              to.
+                            maxLength: 253
+                            minLength: 1
+                            type: string
+                        required:
+                        - name
+                        type: object
                     type: object
                 type: object
             type: object
+            x-kubernetes-validations:
+            - message: secretRef or certManager must be configured when bitwardenSecretManagerProvider
+                plugin is enabled
+              rule: '!has(self.plugins) || !has(self.plugins.bitwardenSecretManagerProvider)
+                || !has(self.plugins.bitwardenSecretManagerProvider.mode) || self.plugins.bitwardenSecretManagerProvider.mode
+                != ''Enabled'' || has(self.plugins.bitwardenSecretManagerProvider.secretRef)
+                || (has(self.controllerConfig) && has(self.controllerConfig.certProvider)
+                && has(self.controllerConfig.certProvider.certManager) && has(self.controllerConfig.certProvider.certManager.mode)
+                && self.controllerConfig.certProvider.certManager.mode == ''Enabled'')'
           status:
-            description: status is the most recently observed status of the ExternalSecrets.
+            description: status is the most recently observed status of the ExternalSecretsConfig.
             properties:
+              bitwardenSDKServerImage:
+                description: BitwardenSDKServerImage is the name of the image and
+                  the tag used for deploying bitwarden-sdk-server.
+                type: string
               conditions:
                 description: conditions holds information of the current state of
                   deployment.
@@ -1310,7 +1405,7 @@ spec:
             type: object
         type: object
         x-kubernetes-validations:
-        - message: ExternalSecrets is a singleton, .metadata.name must be 'cluster'
+        - message: ExternalSecretsConfig is a singleton, .metadata.name must be 'cluster'
           rule: self.metadata.name == 'cluster'
     served: true
     storage: true
diff --git a/config/crd/bases/operator.openshift.io_externalsecretsmanagers.yaml b/config/crd/bases/operator.openshift.io_externalsecretsmanagers.yaml
index 3de54129..938dbe53 100644
--- a/config/crd/bases/operator.openshift.io_externalsecretsmanagers.yaml
+++ b/config/crd/bases/operator.openshift.io_externalsecretsmanagers.yaml
@@ -4,27 +4,39 @@ kind: CustomResourceDefinition
 metadata:
   annotations:
     controller-gen.kubebuilder.io/version: v0.17.3
+  labels:
+    app.kubernetes.io/name: externalsecretsmanager
+    app.kubernetes.io/part-of: external-secrets-operator
   name: externalsecretsmanagers.operator.openshift.io
 spec:
   group: operator.openshift.io
   names:
+    categories:
+    - external-secrets-operator
+    - external-secrets
     kind: ExternalSecretsManager
     listKind: ExternalSecretsManagerList
     plural: externalsecretsmanagers
+    shortNames:
+    - esm
+    - externalsecretsmanager
+    - esmanager
     singular: externalsecretsmanager
   scope: Cluster
   versions:
-  - name: v1alpha1
+  - additionalPrinterColumns:
+    - jsonPath: .metadata.creationTimestamp
+      name: AGE
+      type: date
+    name: v1alpha1
     schema:
       openAPIV3Schema:
         description: |-
-          ExternalSecretsManager describes configuration and information about the deployments managed by
-          the external-secrets-operator. The name must be `cluster` as this is a singleton object allowing
-          only one instance of ExternalSecretsManager per cluster.
+          ExternalSecretsManager describes configuration and information about the deployments managed by the external-secrets-operator.
+          The name must be `cluster` as this is a singleton object allowing only one instance of ExternalSecretsManager per cluster.
 
-          It is mainly for configuring the global options and enabling optional features, which
-          serves as a common/centralized config for managing multiple controllers of the operator. The object
-          is automatically created during the operator installation.
+          It is mainly for configuring the global options and enabling optional features, which serves as a common/centralized config for managing multiple controllers of the operator.
+          The object is automatically created during the operator installation.
         properties:
           apiVersion:
             description: |-
@@ -46,29 +58,9 @@ spec:
           spec:
             description: spec is the specification of the desired behavior
             properties:
-              features:
-                description: features is for enabling the optional operator features.
-                items:
-                  description: |-
-                    Feature is for enabling the optional features.
-                    Feature is for enabling the optional features.
-                  properties:
-                    enabled:
-                      description: enabled determines if feature should be turned
-                        on.
-                      type: boolean
-                    name:
-                      description: name of the optional feature.
-                      type: string
-                  required:
-                  - enabled
-                  - name
-                  type: object
-                type: array
               globalConfig:
-                description: |-
-                  globalConfig is for configuring the behavior of deployments that are managed
-                  by external secrets-operator.
+                description: globalConfig is for configuring the behavior of deployments
+                  that are managed by external secrets-operator.
                 properties:
                   affinity:
                     description: |-
@@ -1002,13 +994,16 @@ spec:
                   labels:
                     additionalProperties:
                       type: string
-                    description: labels to apply to all resources created for external-secrets
-                      deployment.
+                    description: |-
+                      labels to apply to all resources created by the operator.
+                      This field can have a maximum of 20 entries.
+                    maxProperties: 20
+                    minProperties: 0
                     type: object
                     x-kubernetes-map-type: granular
                   logLevel:
                     default: 1
-                    description: logLevel supports value range as per [kubernetes
+                    description: logLevel supports value range as per [Kubernetes
                       logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
                     format: int32
                     maximum: 5
@@ -1020,8 +1015,38 @@ spec:
                     description: |-
                       nodeSelector is for defining the scheduling criteria using node labels.
                       ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
+                      This field can have a maximum of 50 entries.
+                    maxProperties: 50
+                    minProperties: 0
                     type: object
                     x-kubernetes-map-type: atomic
+                  proxy:
+                    description: proxy is for setting the proxy configurations which
+                      will be made available in operand containers managed by the
+                      operator as environment variables.
+                    properties:
+                      httpProxy:
+                        description: |-
+                          httpProxy is the URL of the proxy for HTTP requests.
+                          This field can have a maximum of 2048 characters.
+                        maxLength: 2048
+                        minLength: 0
+                        type: string
+                      httpsProxy:
+                        description: |-
+                          httpsProxy is the URL of the proxy for HTTPS requests.
+                          This field can have a maximum of 2048 characters.
+                        maxLength: 2048
+                        minLength: 0
+                        type: string
+                      noProxy:
+                        description: |-
+                          noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used.
+                          This field can have a maximum of 4096 characters.
+                        maxLength: 4096
+                        minLength: 0
+                        type: string
+                    type: object
                   resources:
                     description: |-
                       resources is for defining the resource requirements.
@@ -1088,6 +1113,7 @@ spec:
                     description: |-
                       tolerations is for setting the pod tolerations.
                       ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+                      This field can have a maximum of 50 entries.
                     items:
                       description: |-
                         The pod this Toleration is attached to tolerates any taint that matches
@@ -1124,14 +1150,45 @@ spec:
                             If the operator is Exists, the value should be empty, otherwise just a regular string.
                           type: string
                       type: object
+                    maxItems: 50
+                    minItems: 0
                     type: array
                     x-kubernetes-list-type: atomic
                 type: object
+              optionalFeatures:
+                description: optionalFeatures is for enabling the optional operator
+                  features.
+                items:
+                  description: Feature is for enabling the optional features.
+                  properties:
+                    mode:
+                      description: |-
+                        mode indicates the feature state.
+                        Use Enabled or Disabled to indicate the preference.
+                        Enabled: Enables the optional feature and creates resources if required.
+                        Disabled: Disables the optional feature, but will not remove any resources created.
+                      enum:
+                      - Enabled
+                      - Disabled
+                      type: string
+                    name:
+                      description: name of the optional feature. There are no optional
+                        features currently supported.
+                      enum:
+                      - ""
+                      type: string
+                  required:
+                  - mode
+                  - name
+                  type: object
+                type: array
+                x-kubernetes-list-map-keys:
+                - name
+                x-kubernetes-list-type: map
             type: object
           status:
-            description: |-
-              status is the most recently observed status of controllers used by
-              External Secrets Operator.
+            description: status is the most recently observed status of controllers
+              used by External Secrets Operator.
             properties:
               controllerStatuses:
                 description: controllerStatuses holds the observed conditions of the
diff --git a/config/crd/kustomization.yaml b/config/crd/kustomization.yaml
index 790cdbd9..d54ce5f2 100644
--- a/config/crd/kustomization.yaml
+++ b/config/crd/kustomization.yaml
@@ -2,7 +2,7 @@
 # since it depends on service name and namespace that are out of this kustomize package.
 # It should be run by config/default
 resources:
-- bases/operator.openshift.io_externalsecrets.yaml
+- bases/operator.openshift.io_externalsecretsconfigs.yaml
 - bases/operator.openshift.io_externalsecretsmanagers.yaml
 - bases/customresourcedefinition_acraccesstokens.generators.external-secrets.io.yml
 - bases/customresourcedefinition_clusterexternalsecrets.external-secrets.io.yml
diff --git a/config/manifests/bases/external-secrets-operator.clusterserviceversion.yaml b/config/manifests/bases/external-secrets-operator.clusterserviceversion.yaml
index 3eae0e46..85cd9c25 100644
--- a/config/manifests/bases/external-secrets-operator.clusterserviceversion.yaml
+++ b/config/manifests/bases/external-secrets-operator.clusterserviceversion.yaml
@@ -19,7 +19,7 @@ metadata:
     features.operators.openshift.io/token-auth-azure: "false"
     features.operators.openshift.io/token-auth-gcp: "false"
     olm.skipRange: <1.0.0
-    operator.openshift.io/uninstall-message: The external secrets operator for Red
+    operator.openshift.io/uninstall-message: The External Secrets Operator for Red
       Hat OpenShift will be removed from external-secrets-operator namespace. If your
       Operator configured any off-cluster resources, these will continue to run and
       require manual cleanup. All operands created by the operator will need to be
@@ -120,19 +120,17 @@ spec:
       name: externalsecretsmanagers.operator.openshift.io
       version: v1alpha1
     - description: |-
-        ExternalSecrets describes configuration and information about the managed external-secrets
-        deployment. The name must be `cluster` as ExternalSecrets is a singleton,
-        allowing only one instance per cluster.
+        ExternalSecretsConfig describes configuration and information about the managed external-secrets deployment.
+        The name must be `cluster` as ExternalSecretsConfig is a singleton, allowing only one instance per cluster.
 
-        When an ExternalSecrets is created, a new deployment is created which manages the
-        external-secrets and keeps it in the desired state.
-      displayName: ExternalSecrets
-      kind: ExternalSecrets
-      name: externalsecrets.operator.openshift.io
+        When an ExternalSecretsConfig is created, the controller installs the external-secrets and keeps it in the desired state.
+      displayName: ExternalSecretsConfig
+      kind: ExternalSecretsConfig
+      name: externalsecretsconfigs.operator.openshift.io
       version: v1alpha1
-  description: external secrets operator for Red Hat OpenShift deploys and manages
+  description: External Secrets Operator for Red Hat OpenShift deploys and manages
     `external-secrets` application in OpenShift clusters. `external-secrets` provides
-    an uniformed interface to fetch secrets stored in external providers like  AWS
+    an uniform interface to fetch secrets stored in external providers like  AWS
     Secrets Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM
     Cloud Secrets Manager to name a few, stores them as secrets in OpenShift. It provides
     APIs to define authentication and the details of the secret to fetch.
diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml
index c790aa37..24c8cf0f 100644
--- a/config/rbac/role.yaml
+++ b/config/rbac/role.yaml
@@ -161,7 +161,7 @@ rules:
 - apiGroups:
   - operator.openshift.io
   resources:
-  - externalsecrets
+  - externalsecretsconfigs
   verbs:
   - create
   - get
@@ -171,14 +171,14 @@ rules:
 - apiGroups:
   - operator.openshift.io
   resources:
-  - externalsecrets/finalizers
+  - externalsecretsconfigs/finalizers
   - externalsecretsmanagers/finalizers
   verbs:
   - update
 - apiGroups:
   - operator.openshift.io
   resources:
-  - externalsecrets/status
+  - externalsecretsconfigs/status
   verbs:
   - get
   - update
diff --git a/config/samples/kustomization.yaml b/config/samples/kustomization.yaml
index de697f5e..232a7c84 100644
--- a/config/samples/kustomization.yaml
+++ b/config/samples/kustomization.yaml
@@ -1,6 +1,6 @@
 ## Append samples of your project ##
 resources:
-- operator_v1alpha1_externalsecrets.yaml
+- operator_v1alpha1_externalsecretsconfig.yaml
 - operator_v1alpha1_externalsecretsmanager.yaml
 - password.yaml
 - cluster_secretstore.yaml
diff --git a/config/samples/operator_v1alpha1_externalsecrets.yaml b/config/samples/operator_v1alpha1_externalsecretsconfig.yaml
similarity index 81%
rename from config/samples/operator_v1alpha1_externalsecrets.yaml
rename to config/samples/operator_v1alpha1_externalsecretsconfig.yaml
index 44067cb0..97c72d75 100644
--- a/config/samples/operator_v1alpha1_externalsecrets.yaml
+++ b/config/samples/operator_v1alpha1_externalsecretsconfig.yaml
@@ -1,5 +1,5 @@
 apiVersion: operator.openshift.io/v1alpha1
-kind: ExternalSecrets
+kind: ExternalSecretsConfig
 metadata:
   labels:
     app: external-secrets-operator
diff --git a/docs/api_reference.md b/docs/api_reference.md
index 6b8ae578..f576be8f 100644
--- a/docs/api_reference.md
+++ b/docs/api_reference.md
@@ -9,29 +9,51 @@
 Package v1alpha1 contains API Schema definitions for the operator v1alpha1 API group
 
 ### Resource Types
-- [ExternalSecrets](#externalsecrets)
-- [ExternalSecretsList](#externalsecretslist)
+- [ExternalSecretsConfig](#externalsecretsconfig)
+- [ExternalSecretsConfigList](#externalsecretsconfiglist)
 - [ExternalSecretsManager](#externalsecretsmanager)
 - [ExternalSecretsManagerList](#externalsecretsmanagerlist)
 
 
 
+#### ApplicationConfig
+
+
+
+ApplicationConfig is for specifying the configurations for the external-secrets operand.
+
+
+
+_Appears in:_
+- [ExternalSecretsConfigSpec](#externalsecretsconfigspec)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `operatingNamespace` _string_ | operatingNamespace is for restricting the external-secrets operations to the provided namespace.
When configured `ClusterSecretStore` and `ClusterExternalSecret` are implicitly disabled. | | MaxLength: 63
MinLength: 1
Optional: \{\}
| +| `webhookConfig` _[WebhookConfig](#webhookconfig)_ | webhookConfig is for configuring external-secrets webhook specifics. | | Optional: \{\}
| +| `logLevel` _integer_ | logLevel supports value range as per [Kubernetes logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). | 1 | Maximum: 5
Minimum: 1
Optional: \{\}
| +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | resources is for defining the resource requirements.
Cannot be updated.
ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | Optional: \{\}
| +| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | affinity is for setting scheduling affinity rules.
ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ | | Optional: \{\}
| +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | tolerations is for setting the pod tolerations.
ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
This field can have a maximum of 50 entries. | | MaxItems: 50
MinItems: 0
Optional: \{\}
| +| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is for defining the scheduling criteria using node labels.
ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
This field can have a maximum of 50 entries. | | MaxProperties: 50
MinProperties: 0
Optional: \{\}
| +| `proxy` _[ProxyConfig](#proxyconfig)_ | proxy is for setting the proxy configurations which will be made available in operand containers managed by the operator as environment variables. | | Optional: \{\}
| + + #### BitwardenSecretManagerProvider -BitwardenSecretManagerProvider is for enabling the bitwarden secrets manager provider and -for setting up the additional service required for connecting with the bitwarden server. +BitwardenSecretManagerProvider is for enabling the bitwarden secrets manager provider and for setting up the additional service required for connecting with the bitwarden server. _Appears in:_ -- [ExternalSecretsConfig](#externalsecretsconfig) +- [PluginsConfig](#pluginsconfig) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `enabled` _string_ | enabled is for enabling the bitwarden secrets manager provider, which can be indicated
by setting `true` or `false`. | false | Enum: [true false]
Optional: \{\}
| -| `secretRef` _SecretReference_ | SecretRef is the kubernetes secret containing the TLS key pair to be used for the bitwarden server.
The issuer in CertManagerConfig will be utilized to generate the required certificate if the secret
reference is not provided and CertManagerConfig is configured. The key names in secret for certificate
must be `tls.crt`, for private key must be `tls.key` and for CA certificate key name must be `ca.crt`. | | Optional: \{\}
| +| `mode` _[Mode](#mode)_ | mode indicates bitwarden secrets manager provider state, which can be indicated by setting Enabled or Disabled.
Enabled: Enables the Bitwarden provider plugin. The operator will ensure the plugin is deployed and its state is synchronized.
Disabled: Disables reconciliation of the Bitwarden provider plugin. The plugin and its resources will remain in their current state and will not be managed by the operator. | Disabled | Enum: [Enabled Disabled]
Optional: \{\}
| +| `secretRef` _SecretReference_ | SecretRef is the Kubernetes secret containing the TLS key pair to be used for the bitwarden server.
The issuer in CertManagerConfig will be utilized to generate the required certificate if the secret reference is not provided and CertManagerConfig is configured.
The key names in secret for certificate must be `tls.crt`, for private key must be `tls.key` and for CA certificate key name must be `ca.crt`. | | Optional: \{\}
| #### CertManagerConfig @@ -43,15 +65,53 @@ CertManagerConfig is for configuring cert-manager specifics. _Appears in:_ -- [ExternalSecretsConfig](#externalsecretsconfig) +- [CertProvidersConfig](#certprovidersconfig) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `enabled` _string_ | enabled is for enabling the use of cert-manager for obtaining and renewing the
certificates used for webhook server, instead of built-in certificates.
Use `true` or `false` to indicate the preference. | false | Enum: [true false]
Required: \{\}
| -| `addInjectorAnnotations` _string_ | addInjectorAnnotations is for adding the `cert-manager.io/inject-ca-from` annotation to the
webhooks and CRDs to automatically setup webhook to the cert-manager CA. This requires
CA Injector to be enabled in cert-manager. Use `true` or `false` to indicate the preference. | false | Enum: [true false]
Optional: \{\}
| -| `issuerRef` _ObjectReference_ | issuerRef contains details to the referenced object used for
obtaining the certificates. It must exist in the external-secrets
namespace if not using a cluster-scoped cert-manager issuer. | | Required: \{\}
| +| `mode` _[Mode](#mode)_ | mode indicates whether to use cert-manager for certificate management, instead of built-in cert-controller.
Enabled: Makes use of cert-manager for obtaining the certificates for webhook server and other components.
Disabled: Makes use of in-built cert-controller for obtaining the certificates for webhook server, which is the default behavior.
This field is immutable once set. | Disabled | Enum: [Enabled Disabled]
Required: \{\}
| +| `injectAnnotations` _string_ | injectAnnotations is for adding the `cert-manager.io/inject-ca-from` annotation to the webhooks and CRDs to automatically setup webhook to use the cert-manager CA. This requires CA Injector to be enabled in cert-manager.
Use `true` or `false` to indicate the preference. This field is immutable once set. | false | Enum: [true false]
Optional: \{\}
| +| `issuerRef` _ObjectReference_ | issuerRef contains details of the referenced object used for obtaining certificates.
When `issuerRef.Kind` is `Issuer`, it must exist in the `external-secrets` namespace.
This field is immutable once set. | | Optional: \{\}
| | `certificateDuration` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#duration-v1-meta)_ | certificateDuration is the validity period of the webhook certificate. | 8760h | Optional: \{\}
| -| `certificateRenewBefore` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#duration-v1-meta)_ | certificateRenewBefore is the ahead time to renew the webhook certificate
before expiry. | 30m | Optional: \{\}
| +| `certificateRenewBefore` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#duration-v1-meta)_ | certificateRenewBefore is the ahead time to renew the webhook certificate before expiry. | 30m | Optional: \{\}
| + + +#### CertProvidersConfig + + + +CertProvidersConfig defines the configuration for certificate providers used to manage TLS certificates for webhook and plugins. + + + +_Appears in:_ +- [ControllerConfig](#controllerconfig) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `certManager` _[CertManagerConfig](#certmanagerconfig)_ | certManager is for configuring cert-manager provider specifics. | | Optional: \{\}
| + + +#### CommonConfigs + + + +CommonConfigs are the common configurations available for all the operands managed by the operator. + + + +_Appears in:_ +- [ApplicationConfig](#applicationconfig) +- [GlobalConfig](#globalconfig) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `logLevel` _integer_ | logLevel supports value range as per [Kubernetes logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). | 1 | Maximum: 5
Minimum: 1
Optional: \{\}
| +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | resources is for defining the resource requirements.
Cannot be updated.
ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | Optional: \{\}
| +| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | affinity is for setting scheduling affinity rules.
ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ | | Optional: \{\}
| +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | tolerations is for setting the pod tolerations.
ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
This field can have a maximum of 50 entries. | | MaxItems: 50
MinItems: 0
Optional: \{\}
| +| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is for defining the scheduling criteria using node labels.
ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
This field can have a maximum of 50 entries. | | MaxProperties: 50
MinProperties: 0
Optional: \{\}
| +| `proxy` _[ProxyConfig](#proxyconfig)_ | proxy is for setting the proxy configurations which will be made available in operand containers managed by the operator as environment variables. | | Optional: \{\}
| #### Condition @@ -76,12 +136,12 @@ _Appears in:_ - +ConditionalStatus holds information of the current state of the external-secrets deployment indicated through defined conditions. _Appears in:_ -- [ExternalSecretsStatus](#externalsecretsstatus) +- [ExternalSecretsConfigStatus](#externalsecretsconfigstatus) | Field | Description | Default | Validation | | --- | --- | --- | --- | @@ -92,18 +152,18 @@ _Appears in:_ -ControllerConfig is for configuring the operator for setting up -defaults to install external-secrets. +ControllerConfig is for specifying the configurations for the controller to use while installing the `external-secrets` operand and the plugins. _Appears in:_ -- [ExternalSecretsSpec](#externalsecretsspec) +- [ExternalSecretsConfigSpec](#externalsecretsconfigspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `namespace` _string_ | namespace is for configuring the namespace to install the external-secret operand. | external-secrets | Optional: \{\}
| -| `labels` _object (keys:string, values:string)_ | labels to apply to all resources created for external-secrets deployment. | | Optional: \{\}
| +| `certProvider` _[CertProvidersConfig](#certprovidersconfig)_ | certProvider is for defining the configuration for certificate providers used to manage TLS certificates for webhook and plugins. | | Optional: \{\}
| +| `labels` _object (keys:string, values:string)_ | labels to apply to all resources created for the external-secrets operand deployment.
This field can have a maximum of 20 entries. | | MaxProperties: 20
MinProperties: 0
Optional: \{\}
| +| `periodicReconcileInterval` _integer_ | periodicReconcileInterval specifies the time interval in seconds for periodic reconciliation by the operator.
This controls how often the operator checks resources created for external-secrets operand to ensure they remain in desired state.
Interval can have value between 120-18000 seconds (2 minutes to 5 hours). Defaults to 300 seconds (5 minutes) if not specified. | 300 | Maximum: 18000
Minimum: 120
Optional: \{\}
| #### ControllerStatus @@ -124,84 +184,92 @@ _Appears in:_ | `observedGeneration` _integer_ | observedGeneration represents the .metadata.generation on the observed resource. | | Minimum: 0
| -#### ExternalSecrets +#### ExternalSecretsConfig -ExternalSecrets describes configuration and information about the managed external-secrets -deployment. The name must be `cluster` as ExternalSecrets is a singleton, -allowing only one instance per cluster. +ExternalSecretsConfig describes configuration and information about the managed external-secrets deployment. +The name must be `cluster` as ExternalSecretsConfig is a singleton, allowing only one instance per cluster. -When an ExternalSecrets is created, a new deployment is created which manages the -external-secrets and keeps it in the desired state. +When an ExternalSecretsConfig is created, the controller installs the external-secrets and keeps it in the desired state. _Appears in:_ -- [ExternalSecretsList](#externalsecretslist) +- [ExternalSecretsConfigList](#externalsecretsconfiglist) | Field | Description | Default | Validation | | --- | --- | --- | --- | | `apiVersion` _string_ | `operator.openshift.io/v1alpha1` | | | -| `kind` _string_ | `ExternalSecrets` | | | +| `kind` _string_ | `ExternalSecretsConfig` | | | | `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | -| `spec` _[ExternalSecretsSpec](#externalsecretsspec)_ | spec is the specification of the desired behavior of the ExternalSecrets. | | | -| `status` _[ExternalSecretsStatus](#externalsecretsstatus)_ | status is the most recently observed status of the ExternalSecrets. | | | +| `spec` _[ExternalSecretsConfigSpec](#externalsecretsconfigspec)_ | spec is the specification of the desired behavior of the ExternalSecretsConfig. | | | +| `status` _[ExternalSecretsConfigStatus](#externalsecretsconfigstatus)_ | status is the most recently observed status of the ExternalSecretsConfig. | | | -#### ExternalSecretsConfig +#### ExternalSecretsConfigList -ExternalSecretsConfig is for configuring the external-secrets behavior. +ExternalSecretsConfigList is a list of ExternalSecretsConfig objects. + -_Appears in:_ -- [ExternalSecretsSpec](#externalsecretsspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `logLevel` _integer_ | logLevel supports value range as per [kubernetes logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). | 1 | Maximum: 5
Minimum: 1
Optional: \{\}
| -| `operatingNamespace` _string_ | operatingNamespace is for restricting the external-secrets operations to provided namespace.
And when enabled `ClusterSecretStore` and `ClusterExternalSecret` are implicitly disabled. | | Optional: \{\}
| -| `bitwardenSecretManagerProvider` _[BitwardenSecretManagerProvider](#bitwardensecretmanagerprovider)_ | bitwardenSecretManagerProvider is for enabling the bitwarden secrets manager provider and
for setting up the additional service required for connecting with the bitwarden server. | | Optional: \{\}
| -| `webhookConfig` _[WebhookConfig](#webhookconfig)_ | webhookConfig is for configuring external-secrets webhook specifics. | | | -| `certManagerConfig` _[CertManagerConfig](#certmanagerconfig)_ | CertManagerConfig is for configuring cert-manager specifics, which will be used for generating
certificates for webhook and bitwarden-sdk-server components. | | Optional: \{\}
| -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | resources is for defining the resource requirements.
Cannot be updated.
ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | Optional: \{\}
| -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | affinity is for setting scheduling affinity rules.
ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ | | Optional: \{\}
| -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | tolerations is for setting the pod tolerations.
ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | Optional: \{\}
| -| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is for defining the scheduling criteria using node labels.
ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | | Optional: \{\}
| +| `apiVersion` _string_ | `operator.openshift.io/v1alpha1` | | | +| `kind` _string_ | `ExternalSecretsConfigList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[ExternalSecretsConfig](#externalsecretsconfig) array_ | | | | -#### ExternalSecretsList +#### ExternalSecretsConfigSpec -ExternalSecretsList is a list of ExternalSecrets objects. +ExternalSecretsConfigSpec is for configuring the external-secrets operand behavior. +_Appears in:_ +- [ExternalSecretsConfig](#externalsecretsconfig) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `appConfig` _[ApplicationConfig](#applicationconfig)_ | appConfig is for specifying the configurations for the `external-secrets` operand. | | Optional: \{\}
| +| `plugins` _[PluginsConfig](#pluginsconfig)_ | plugins is for configuring the optional provider plugins. | | Optional: \{\}
| +| `controllerConfig` _[ControllerConfig](#controllerconfig)_ | controllerConfig is for specifying the configurations for the controller to use while installing the `external-secrets` operand and the plugins. | | Optional: \{\}
| + + +#### ExternalSecretsConfigStatus + + + +ExternalSecretsConfigStatus is the most recently observed status of the ExternalSecretsConfig. + +_Appears in:_ +- [ExternalSecretsConfig](#externalsecretsconfig) + | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `apiVersion` _string_ | `operator.openshift.io/v1alpha1` | | | -| `kind` _string_ | `ExternalSecretsList` | | | -| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | -| `items` _[ExternalSecrets](#externalsecrets) array_ | | | | +| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | conditions holds information of the current state of deployment. | | | +| `externalSecretsImage` _string_ | externalSecretsImage is the name of the image and the tag used for deploying external-secrets. | | | +| `bitwardenSDKServerImage` _string_ | BitwardenSDKServerImage is the name of the image and the tag used for deploying bitwarden-sdk-server. | | | #### ExternalSecretsManager -ExternalSecretsManager describes configuration and information about the deployments managed by -the external-secrets-operator. The name must be `cluster` as this is a singleton object allowing -only one instance of ExternalSecretsManager per cluster. +ExternalSecretsManager describes configuration and information about the deployments managed by the external-secrets-operator. +The name must be `cluster` as this is a singleton object allowing only one instance of ExternalSecretsManager per cluster. -It is mainly for configuring the global options and enabling optional features, which -serves as a common/centralized config for managing multiple controllers of the operator. The object -is automatically created during the operator installation. +It is mainly for configuring the global options and enabling optional features, which serves as a common/centralized config for managing multiple controllers of the operator. +The object is automatically created during the operator installation. @@ -214,7 +282,7 @@ _Appears in:_ | `kind` _string_ | `ExternalSecretsManager` | | | | `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | | `spec` _[ExternalSecretsManagerSpec](#externalsecretsmanagerspec)_ | spec is the specification of the desired behavior | | | -| `status` _[ExternalSecretsManagerStatus](#externalsecretsmanagerstatus)_ | status is the most recently observed status of controllers used by
External Secrets Operator. | | | +| `status` _[ExternalSecretsManagerStatus](#externalsecretsmanagerstatus)_ | status is the most recently observed status of controllers used by External Secrets Operator. | | | #### ExternalSecretsManagerList @@ -248,8 +316,8 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `globalConfig` _[GlobalConfig](#globalconfig)_ | globalConfig is for configuring the behavior of deployments that are managed
by external secrets-operator. | | Optional: \{\}
| -| `features` _[Feature](#feature) array_ | features is for enabling the optional operator features. | | Optional: \{\}
| +| `globalConfig` _[GlobalConfig](#globalconfig)_ | globalConfig is for configuring the behavior of deployments that are managed by external secrets-operator. | | Optional: \{\}
| +| `optionalFeatures` _[Feature](#feature) array_ | optionalFeatures is for enabling the optional operator features. | | Optional: \{\}
| #### ExternalSecretsManagerStatus @@ -269,103 +337,126 @@ _Appears in:_ | `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | lastTransitionTime is the last time the condition transitioned from one status to another. | | Format: date-time
Type: string
| -#### ExternalSecretsSpec +#### Feature -ExternalSecretsSpec is the specification of the desired behavior of the ExternalSecrets. +Feature is for enabling the optional features. _Appears in:_ -- [ExternalSecrets](#externalsecrets) +- [ExternalSecretsManagerSpec](#externalsecretsmanagerspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `externalSecretsConfig` _[ExternalSecretsConfig](#externalsecretsconfig)_ | externalSecretsConfig is for configuring the external-secrets behavior. | | Optional: \{\}
| -| `controllerConfig` _[ControllerConfig](#controllerconfig)_ | controllerConfig is for configuring the controller for setting up
defaults to enable external-secrets. | | Optional: \{\}
| +| `name` _string_ | name of the optional feature. There are no optional features currently supported. | | Enum: []
Required: \{\}
| +| `mode` _[Mode](#mode)_ | mode indicates the feature state.
Use Enabled or Disabled to indicate the preference.
Enabled: Enables the optional feature and creates resources if required.
Disabled: Disables the optional feature, but will not remove any resources created. | | Enum: [Enabled Disabled]
Required: \{\}
| -#### ExternalSecretsStatus +#### GlobalConfig -ExternalSecretsStatus is the most recently observed status of the ExternalSecrets. +GlobalConfig is for configuring the external-secrets-operator behavior. _Appears in:_ -- [ExternalSecrets](#externalsecrets) +- [ExternalSecretsManagerSpec](#externalsecretsmanagerspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#condition-v1-meta) array_ | conditions holds information of the current state of deployment. | | | -| `externalSecretsImage` _string_ | externalSecretsImage is the name of the image and the tag used for deploying external-secrets. | | | +| `labels` _object (keys:string, values:string)_ | labels to apply to all resources created by the operator.
This field can have a maximum of 20 entries. | | MaxProperties: 20
MinProperties: 0
Optional: \{\}
| +| `logLevel` _integer_ | logLevel supports value range as per [Kubernetes logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). | 1 | Maximum: 5
Minimum: 1
Optional: \{\}
| +| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | resources is for defining the resource requirements.
Cannot be updated.
ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | Optional: \{\}
| +| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | affinity is for setting scheduling affinity rules.
ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ | | Optional: \{\}
| +| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | tolerations is for setting the pod tolerations.
ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
This field can have a maximum of 50 entries. | | MaxItems: 50
MinItems: 0
Optional: \{\}
| +| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is for defining the scheduling criteria using node labels.
ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
This field can have a maximum of 50 entries. | | MaxProperties: 50
MinProperties: 0
Optional: \{\}
| +| `proxy` _[ProxyConfig](#proxyconfig)_ | proxy is for setting the proxy configurations which will be made available in operand containers managed by the operator as environment variables. | | Optional: \{\}
| -#### Feature +#### Mode +_Underlying type:_ _string_ +Mode indicates the operational state of the optional features. -Feature is for enabling the optional features. -Feature is for enabling the optional features. + + +_Appears in:_ +- [BitwardenSecretManagerProvider](#bitwardensecretmanagerprovider) +- [CertManagerConfig](#certmanagerconfig) +- [Feature](#feature) + +| Field | Description | +| --- | --- | +| `Enabled` | Enabled indicates the optional configuration is enabled.
| +| `Disabled` | Disabled indicates the optional configuration is disabled.
| +| `DisabledAndCleanup` | DisabledAndCleanup indicates the optional configuration is disabled and created resources are automatically removed.
| + + +#### ObjectReference + + + +ObjectReference is a reference to an object with a given name, kind and group. _Appears in:_ -- [ExternalSecretsManagerSpec](#externalsecretsmanagerspec) +- [CertManagerConfig](#certmanagerconfig) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `name` _string_ | name of the optional feature. | | Required: \{\}
| -| `enabled` _boolean_ | enabled determines if feature should be turned on. | | Required: \{\}
| +| `name` _string_ | Name of the resource being referred to. | | MaxLength: 253
MinLength: 1
Required: \{\}
| +| `kind` _string_ | Kind of the resource being referred to. | | MaxLength: 253
MinLength: 1
Optional: \{\}
| +| `group` _string_ | Group of the resource being referred to. | | MaxLength: 253
MinLength: 1
Optional: \{\}
| -#### GlobalConfig +#### PluginsConfig -GlobalConfig is for configuring the external-secrets-operator behavior. +PluginsConfig is for configuring the optional plugins. _Appears in:_ -- [ExternalSecretsManagerSpec](#externalsecretsmanagerspec) +- [ExternalSecretsConfigSpec](#externalsecretsconfigspec) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `logLevel` _integer_ | logLevel supports value range as per [kubernetes logging guidelines](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use). | 1 | Maximum: 5
Minimum: 1
Optional: \{\}
| -| `resources` _[ResourceRequirements](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#resourcerequirements-v1-core)_ | resources is for defining the resource requirements.
Cannot be updated.
ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/ | | Optional: \{\}
| -| `affinity` _[Affinity](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#affinity-v1-core)_ | affinity is for setting scheduling affinity rules.
ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ | | Optional: \{\}
| -| `tolerations` _[Toleration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#toleration-v1-core) array_ | tolerations is for setting the pod tolerations.
ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ | | Optional: \{\}
| -| `nodeSelector` _object (keys:string, values:string)_ | nodeSelector is for defining the scheduling criteria using node labels.
ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ | | Optional: \{\}
| -| `labels` _object (keys:string, values:string)_ | labels to apply to all resources created for external-secrets deployment. | | Optional: \{\}
| +| `bitwardenSecretManagerProvider` _[BitwardenSecretManagerProvider](#bitwardensecretmanagerprovider)_ | bitwardenSecretManagerProvider is for enabling the bitwarden secrets manager provider plugin for connecting with the bitwarden secrets manager. | | Optional: \{\}
| -#### ObjectReference +#### ProxyConfig -ObjectReference is a reference to an object with a given name, kind and group. +ProxyConfig is for setting the proxy configurations which will be made available in operand containers managed by the operator as environment variables. _Appears in:_ -- [CertManagerConfig](#certmanagerconfig) +- [ApplicationConfig](#applicationconfig) +- [CommonConfigs](#commonconfigs) +- [GlobalConfig](#globalconfig) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `name` _string_ | Name of the resource being referred to. | | Required: \{\}
| -| `kind` _string_ | Kind of the resource being referred to. | | Optional: \{\}
| -| `group` _string_ | Group of the resource being referred to. | | Optional: \{\}
| +| `httpProxy` _string_ | httpProxy is the URL of the proxy for HTTP requests.
This field can have a maximum of 2048 characters. | | MaxLength: 2048
MinLength: 0
Optional: \{\}
| +| `httpsProxy` _string_ | httpsProxy is the URL of the proxy for HTTPS requests.
This field can have a maximum of 2048 characters. | | MaxLength: 2048
MinLength: 0
Optional: \{\}
| +| `noProxy` _string_ | noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used.
This field can have a maximum of 4096 characters. | | MaxLength: 4096
MinLength: 0
Optional: \{\}
| + + #### SecretReference -SecretReference is a reference to the secret with the given name, which should exist -in the same namespace where it will be utilized. +SecretReference is a reference to the secret with the given name, which should exist in the same namespace where it will be utilized. @@ -374,7 +465,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `name` _string_ | Name of the secret resource being referred to. | | Required: \{\}
| +| `name` _string_ | Name of the secret resource being referred to. | | MaxLength: 253
MinLength: 1
Required: \{\}
| #### WebhookConfig @@ -386,10 +477,10 @@ WebhookConfig is for configuring external-secrets webhook specifics. _Appears in:_ -- [ExternalSecretsConfig](#externalsecretsconfig) +- [ApplicationConfig](#applicationconfig) | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `certificateCheckInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#duration-v1-meta)_ | CertificateCheckInterval is for configuring the polling interval to check the certificate
validity. | 5m | Optional: \{\}
| +| `certificateCheckInterval` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#duration-v1-meta)_ | CertificateCheckInterval is for configuring the polling interval to check the certificate validity. | 5m | Optional: \{\}
| diff --git a/pkg/controller/common/constants.go b/pkg/controller/common/constants.go index f086e713..6e6fd526 100644 --- a/pkg/controller/common/constants.go +++ b/pkg/controller/common/constants.go @@ -9,10 +9,10 @@ const ( // DefaultRequeueTime is the default reconcile requeue time. DefaultRequeueTime = time.Second * 30 - // ExternalSecretsObjectName is the default name of the externalsecrets.openshift.operator.io CR. - ExternalSecretsObjectName = "cluster" + // ExternalSecretsConfigObjectName is the default name of the externalsecretsconfigs.operator.openshift.io CR. + ExternalSecretsConfigObjectName = "cluster" - // ExternalSecretsManagerObjectName is the default name of the externalsecretsmanager.openshift.operator.io CR. + // ExternalSecretsManagerObjectName is the default name of the externalsecretsmanagers.operator.openshift.io CR. ExternalSecretsManagerObjectName = "cluster" // CertManagerInjectCAFromAnnotation is the annotation key added to external-secrets resource once diff --git a/pkg/controller/common/utils.go b/pkg/controller/common/utils.go index 1f3886a3..bc9405fa 100644 --- a/pkg/controller/common/utils.go +++ b/pkg/controller/common/utils.go @@ -381,16 +381,23 @@ func ParseBool(val string) bool { return val == "true" } +// EvalMode is for evaluating the Mode values and return a boolean. This is very specific to the values +// read from CR which allows only `Enabled`, `Disabled` or `DisabledAndCleanup` as values. Returns +// true when has `Enabled` and false for every other value. +func EvalMode(val operatorv1alpha1.Mode) bool { + return val == operatorv1alpha1.Enabled +} + // IsESMSpecEmpty returns whether ExternalSecretsManager CR Spec is empty. func IsESMSpecEmpty(esm *operatorv1alpha1.ExternalSecretsManager) bool { return esm != nil && !reflect.DeepEqual(esm.Spec, operatorv1alpha1.ExternalSecretsManagerSpec{}) } // IsInjectCertManagerAnnotationEnabled is for check if add cert-manager annotation is enabled. -func IsInjectCertManagerAnnotationEnabled(es *operatorv1alpha1.ExternalSecrets) bool { - return es.Spec.ExternalSecretsConfig != nil && - es.Spec.ExternalSecretsConfig.CertManagerConfig != nil && - ParseBool(es.Spec.ExternalSecretsConfig.CertManagerConfig.AddInjectorAnnotations) +func IsInjectCertManagerAnnotationEnabled(esc *operatorv1alpha1.ExternalSecretsConfig) bool { + return esc.Spec.ControllerConfig.CertProvider != nil && + esc.Spec.ControllerConfig.CertProvider.CertManager != nil && + ParseBool(esc.Spec.ControllerConfig.CertProvider.CertManager.InjectAnnotations) } // AddFinalizer adds finalizer to the passed resource object. @@ -412,8 +419,8 @@ func AddFinalizer(ctx context.Context, obj client.Object, opClient operatorclien return fmt.Errorf("failed to fetch %q after updating finalizers: %w", namespacedName, err) } updated.DeepCopyInto(o) - case *operatorv1alpha1.ExternalSecrets: - updated := &operatorv1alpha1.ExternalSecrets{} + case *operatorv1alpha1.ExternalSecretsConfig: + updated := &operatorv1alpha1.ExternalSecretsConfig{} if err := opClient.Get(ctx, namespacedName, updated); err != nil { return fmt.Errorf("failed to fetch %q after updating finalizers: %w", namespacedName, err) } @@ -431,11 +438,11 @@ func RemoveFinalizer(ctx context.Context, obj client.Object, opClient operatorcl namespacedName := types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()} if controllerutil.ContainsFinalizer(obj, finalizer) { if !controllerutil.RemoveFinalizer(obj, finalizer) { - return fmt.Errorf("failed to create %q externalsecrets.openshift.operator.io object with finalizers removed", namespacedName) + return fmt.Errorf("failed to update %q externalsecretsconfigs.operator.openshift.io object with finalizers removed", namespacedName) } if err := opClient.UpdateWithRetry(ctx, obj); err != nil { - return fmt.Errorf("failed to remove finalizers on %q externalsecrets.openshift.operator.io with %w", namespacedName, err) + return fmt.Errorf("failed to remove finalizers on %q externalsecretsconfigs.operator.openshift.io with %w", namespacedName, err) } return nil } @@ -455,7 +462,7 @@ func (n *Now) Do(f func()) { } } -// Reset is for allowing Do to call the func f again. +// Reset is for allowing the Do method to call the func f again. func (n *Now) Reset() { n.Lock() defer n.Unlock() diff --git a/pkg/controller/commontest/utils.go b/pkg/controller/commontest/utils.go index dc62c8e2..f26be67c 100644 --- a/pkg/controller/commontest/utils.go +++ b/pkg/controller/commontest/utils.go @@ -9,8 +9,8 @@ import ( ) const ( - // TestExternalSecretsResourceName is the name for ExternalSecrets test CR. - TestExternalSecretsResourceName = "cluster" + // TestExternalSecretsConfigResourceName is the name for ExternalSecretsConfig test CR. + TestExternalSecretsConfigResourceName = "cluster" // TestExternalSecretsImageName is the sample image name for external-secrets operand. TestExternalSecretsImageName = "registry.redhat.io/external-secrets-operator/external-secrets-operator-rhel9" @@ -19,7 +19,7 @@ const ( TestBitwardenImageName = "registry.stage.redhat.io/external-secrets-operator/bitwarden-sdk-server-rhel9" // TestExternalSecretsNamespace is the sample namespace name for external-secrets deployment. - TestExternalSecretsNamespace = "test-external-secrets" + TestExternalSecretsNamespace = "external-secrets" // TestCRDName can be used for sample CRD resources. TestCRDName = "test-crd" @@ -30,11 +30,11 @@ var ( TestClientError = fmt.Errorf("test client error") ) -// TestExternalSecrets returns a sample ExternalSecrets object. -func TestExternalSecrets() *operatorv1alpha1.ExternalSecrets { - return &operatorv1alpha1.ExternalSecrets{ +// TestExternalSecretsConfig returns a sample ExternalSecretsConfig object. +func TestExternalSecretsConfig() *operatorv1alpha1.ExternalSecretsConfig { + return &operatorv1alpha1.ExternalSecretsConfig{ ObjectMeta: metav1.ObjectMeta{ - Name: TestExternalSecretsResourceName, + Name: TestExternalSecretsConfigResourceName, }, } } @@ -43,7 +43,7 @@ func TestExternalSecrets() *operatorv1alpha1.ExternalSecrets { func TestExternalSecretsManager() *operatorv1alpha1.ExternalSecretsManager { return &operatorv1alpha1.ExternalSecretsManager{ ObjectMeta: metav1.ObjectMeta{ - Name: TestExternalSecretsResourceName, + Name: TestExternalSecretsConfigResourceName, }, } } diff --git a/pkg/controller/crd_annotator/controller.go b/pkg/controller/crd_annotator/controller.go index e711f815..2231651f 100644 --- a/pkg/controller/crd_annotator/controller.go +++ b/pkg/controller/crd_annotator/controller.go @@ -106,7 +106,7 @@ func BuildCustomClient(mgr ctrl.Manager) (client.Client, error) { &crdv1.CustomResourceDefinition{}: { Label: managedResourceLabelReqSelector, }, - &operatorv1alpha1.ExternalSecrets{}: {}, + &operatorv1alpha1.ExternalSecretsConfig{}: {}, }, ReaderFailOnMissingInformer: true, } @@ -117,7 +117,7 @@ func BuildCustomClient(mgr ctrl.Manager) (client.Client, error) { if _, err = customCache.GetInformer(context.Background(), &crdv1.CustomResourceDefinition{}); err != nil { return nil, err } - if _, err = customCache.GetInformer(context.Background(), &operatorv1alpha1.ExternalSecrets{}); err != nil { + if _, err = customCache.GetInformer(context.Background(), &operatorv1alpha1.ExternalSecretsConfig{}); err != nil { return nil, err } @@ -153,8 +153,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { objName = obj.GetName() } } - if obj.GetObjectKind().GroupVersionKind().GroupKind().String() == - (&operatorv1alpha1.ExternalSecrets{}).GetObjectKind().GroupVersionKind().GroupKind().String() { + if _, ok := obj.(*operatorv1alpha1.ExternalSecretsConfig); ok { objName = reconcileObjectIdentifier } if objName != "" { @@ -180,7 +179,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). Named(ControllerName). WatchesMetadata(&crdv1.CustomResourceDefinition{}, handler.EnqueueRequestsFromMapFunc(mapFunc), managedResourcePredicate). - Watches(&operatorv1alpha1.ExternalSecrets{}, handler.EnqueueRequestsFromMapFunc(mapFunc), builder.WithPredicates(predicate.GenerationChangedPredicate{})). + Watches(&operatorv1alpha1.ExternalSecretsConfig{}, handler.EnqueueRequestsFromMapFunc(mapFunc), builder.WithPredicates(predicate.GenerationChangedPredicate{})). Complete(r) } @@ -189,30 +188,30 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.log.V(1).Info("reconciling", "request", req) - // Fetch the externalsecrets.openshift.operator.io CR - es := &operatorv1alpha1.ExternalSecrets{} + // Fetch the externalsecretsconfigs.operator.openshift.io CR + esc := &operatorv1alpha1.ExternalSecretsConfig{} key := types.NamespacedName{ - Name: common.ExternalSecretsObjectName, + Name: common.ExternalSecretsConfigObjectName, } - if err := r.Get(ctx, key, es); err != nil { + if err := r.Get(ctx, key, esc); err != nil { if errors.IsNotFound(err) { // NotFound errors, would mean the object hasn't been created yet and // not required to reconcile yet. - r.log.V(1).Info("externalsecrets.openshift.operator.io object not found, skipping reconciliation", "key", key) + r.log.V(1).Info("externalsecretsconfigs.operator.openshift.io object not found, skipping reconciliation", "key", key) return ctrl.Result{}, nil } - return ctrl.Result{}, fmt.Errorf("failed to fetch externalsecrets.openshift.operator.io %q during reconciliation: %w", key, err) + return ctrl.Result{}, fmt.Errorf("failed to fetch externalsecretsconfigs.operator.openshift.io %q during reconciliation: %w", key, err) } - if common.IsInjectCertManagerAnnotationEnabled(es) { - return r.processReconcileRequest(es, req.NamespacedName) + if common.IsInjectCertManagerAnnotationEnabled(esc) { + return r.processReconcileRequest(esc, req.NamespacedName) } return ctrl.Result{}, nil } // processReconcileRequest is the reconciliation handler to manage the resources. -func (r *Reconciler) processReconcileRequest(es *operatorv1alpha1.ExternalSecrets, req types.NamespacedName) (ctrl.Result, error) { +func (r *Reconciler) processReconcileRequest(esc *operatorv1alpha1.ExternalSecretsConfig, req types.NamespacedName) (ctrl.Result, error) { var oErr error = nil if req.Name == reconcileObjectIdentifier { if err := r.updateAnnotationsInAllCRDs(); err != nil { @@ -234,7 +233,7 @@ func (r *Reconciler) processReconcileRequest(es *operatorv1alpha1.ExternalSecret } } - if err := r.updateCondition(es, oErr); err != nil { + if err := r.updateCondition(esc, oErr); err != nil { return ctrl.Result{}, utilerrors.NewAggregate([]error{err, oErr}) } @@ -278,10 +277,10 @@ func (r *Reconciler) updateAnnotationsInAllCRDs() error { return nil } -func (r *Reconciler) updateCondition(es *operatorv1alpha1.ExternalSecrets, err error) error { +func (r *Reconciler) updateCondition(esc *operatorv1alpha1.ExternalSecretsConfig, err error) error { cond := metav1.Condition{ Type: operatorv1alpha1.UpdateAnnotation, - ObservedGeneration: es.GetGeneration(), + ObservedGeneration: esc.GetGeneration(), } if err != nil { @@ -294,26 +293,26 @@ func (r *Reconciler) updateCondition(es *operatorv1alpha1.ExternalSecrets, err e cond.Message = "successfully updated annotations" } - if apimeta.SetStatusCondition(&es.Status.Conditions, cond) { - return r.updateStatus(r.ctx, es) + if apimeta.SetStatusCondition(&esc.Status.Conditions, cond) { + return r.updateStatus(r.ctx, esc) } return nil } -// updateStatus is for updating the status subresource of externalsecrets.openshift.operator.io. -func (r *Reconciler) updateStatus(ctx context.Context, changed *operatorv1alpha1.ExternalSecrets) error { +// updateStatus is for updating the status subresource of externalsecretsconfigs.operator.openshift.io. +func (r *Reconciler) updateStatus(ctx context.Context, changed *operatorv1alpha1.ExternalSecretsConfig) error { namespacedName := types.NamespacedName{Name: changed.Name, Namespace: changed.Namespace} if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - r.log.V(4).Info("updating externalsecrets.openshift.operator.io status", "request", namespacedName) - current := &operatorv1alpha1.ExternalSecrets{} + r.log.V(4).Info("updating externalsecretsconfigs.operator.openshift.io status", "request", namespacedName) + current := &operatorv1alpha1.ExternalSecretsConfig{} if err := r.Get(ctx, namespacedName, current); err != nil { - return fmt.Errorf("failed to fetch externalsecrets.openshift.operator.io %q for status update: %w", namespacedName, err) + return fmt.Errorf("failed to fetch externalsecretsconfigs.operator.openshift.io %q for status update: %w", namespacedName, err) } changed.Status.DeepCopyInto(¤t.Status) if err := r.StatusUpdate(ctx, current); err != nil { - return fmt.Errorf("failed to update externalsecrets.openshift.operator.io %q status: %w", namespacedName, err) + return fmt.Errorf("failed to update externalsecretsconfigs.operator.openshift.io %q status: %w", namespacedName, err) } return nil diff --git a/pkg/controller/crd_annotator/controller_test.go b/pkg/controller/crd_annotator/controller_test.go index 983d844b..ef21f211 100644 --- a/pkg/controller/crd_annotator/controller_test.go +++ b/pkg/controller/crd_annotator/controller_test.go @@ -2,12 +2,12 @@ package crd_annotator import ( "context" - "k8s.io/apimachinery/pkg/runtime/schema" "testing" crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -28,12 +28,14 @@ func testReconciler(t *testing.T) *Reconciler { } } -// testExtendExternalSecrets enables CRD annotation specific configs on existing externalsecrets object. -func testExtendExternalSecrets(es *operatorv1alpha1.ExternalSecrets) { - es.Spec = operatorv1alpha1.ExternalSecretsSpec{ - ExternalSecretsConfig: &operatorv1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &operatorv1alpha1.CertManagerConfig{ - AddInjectorAnnotations: "true", +// testExtendExternalSecretsConfig enables CRD annotation specific configs on existing externalsecretsconfig object. +func testExtendExternalSecretsConfig(esc *operatorv1alpha1.ExternalSecretsConfig) { + esc.Spec = operatorv1alpha1.ExternalSecretsConfigSpec{ + ControllerConfig: operatorv1alpha1.ControllerConfig{ + CertProvider: &operatorv1alpha1.CertProvidersConfig{ + CertManager: &operatorv1alpha1.CertManagerConfig{ + InjectAnnotations: "true", + }, }, }, } @@ -72,10 +74,10 @@ func TestReconcile(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: - es := commontest.TestExternalSecrets() - testExtendExternalSecrets(es) - es.DeepCopyInto(o) + case *operatorv1alpha1.ExternalSecretsConfig: + esc := commontest.TestExternalSecretsConfig() + testExtendExternalSecretsConfig(esc) + esc.DeepCopyInto(o) case *crdv1.CustomResourceDefinition: crd := testCRD() crd.DeepCopyInto(o) @@ -101,10 +103,10 @@ func TestReconcile(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: - es := commontest.TestExternalSecrets() - testExtendExternalSecrets(es) - es.DeepCopyInto(o) + case *operatorv1alpha1.ExternalSecretsConfig: + esc := commontest.TestExternalSecretsConfig() + testExtendExternalSecretsConfig(esc) + esc.DeepCopyInto(o) case *crdv1.CustomResourceDefinition: crd := testCRD() crd.DeepCopyInto(o) @@ -141,7 +143,7 @@ func TestReconcile(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: + case *operatorv1alpha1.ExternalSecretsConfig: return commontest.TestClientError case *crdv1.CustomResourceDefinition: crd := testCRD() @@ -157,10 +159,10 @@ func TestReconcile(t *testing.T) { Reason: operatorv1alpha1.ReasonFailed, }, }, - wantErr: `failed to fetch externalsecrets.openshift.operator.io "/cluster" during reconciliation: test client error`, + wantErr: `failed to fetch externalsecretsconfigs.operator.openshift.io "/cluster" during reconciliation: test client error`, }, { - name: "reconciliation successful externalsecrets does not exist", + name: "reconciliation successful externalsecretsconfigs does not exist", request: ctrl.Request{ NamespacedName: types.NamespacedName{ Name: commontest.TestCRDName, @@ -169,11 +171,11 @@ func TestReconcile(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: + case *operatorv1alpha1.ExternalSecretsConfig: return errors.NewNotFound(schema.GroupResource{ Group: operatorv1alpha1.GroupVersion.Group, - Resource: "externalsecrets", - }, commontest.TestExternalSecretsResourceName) + Resource: "externalsecretsconfigs", + }, commontest.TestExternalSecretsConfigResourceName) case *crdv1.CustomResourceDefinition: crd := testCRD() crd.DeepCopyInto(o) @@ -193,9 +195,9 @@ func TestReconcile(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: - es := commontest.TestExternalSecrets() - es.DeepCopyInto(o) + case *operatorv1alpha1.ExternalSecretsConfig: + esc := commontest.TestExternalSecretsConfig() + esc.DeepCopyInto(o) case *crdv1.CustomResourceDefinition: crd := testCRD() crd.DeepCopyInto(o) @@ -215,10 +217,10 @@ func TestReconcile(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: - es := commontest.TestExternalSecrets() - testExtendExternalSecrets(es) - es.DeepCopyInto(o) + case *operatorv1alpha1.ExternalSecretsConfig: + esc := commontest.TestExternalSecretsConfig() + testExtendExternalSecretsConfig(esc) + esc.DeepCopyInto(o) case *crdv1.CustomResourceDefinition: return commontest.TestClientError } @@ -247,10 +249,10 @@ func TestReconcile(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: - es := commontest.TestExternalSecrets() - testExtendExternalSecrets(es) - es.DeepCopyInto(o) + case *operatorv1alpha1.ExternalSecretsConfig: + esc := commontest.TestExternalSecretsConfig() + testExtendExternalSecretsConfig(esc) + esc.DeepCopyInto(o) case *crdv1.CustomResourceDefinition: return commontest.TestClientError } @@ -284,10 +286,10 @@ func TestReconcile(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: - es := commontest.TestExternalSecrets() - testExtendExternalSecrets(es) - es.DeepCopyInto(o) + case *operatorv1alpha1.ExternalSecretsConfig: + esc := commontest.TestExternalSecretsConfig() + testExtendExternalSecretsConfig(esc) + esc.DeepCopyInto(o) case *crdv1.CustomResourceDefinition: return commontest.TestClientError } @@ -313,10 +315,10 @@ func TestReconcile(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: - es := commontest.TestExternalSecrets() - testExtendExternalSecrets(es) - es.DeepCopyInto(o) + case *operatorv1alpha1.ExternalSecretsConfig: + esc := commontest.TestExternalSecretsConfig() + testExtendExternalSecretsConfig(esc) + esc.DeepCopyInto(o) case *crdv1.CustomResourceDefinition: return errors.NewNotFound(schema.GroupResource{ Group: crdv1.SchemeGroupVersion.Group, @@ -344,10 +346,10 @@ func TestReconcile(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: - es := commontest.TestExternalSecrets() - testExtendExternalSecrets(es) - es.DeepCopyInto(o) + case *operatorv1alpha1.ExternalSecretsConfig: + esc := commontest.TestExternalSecretsConfig() + testExtendExternalSecretsConfig(esc) + esc.DeepCopyInto(o) case *crdv1.CustomResourceDefinition: crd := testCRD() crd.DeepCopyInto(o) @@ -377,10 +379,10 @@ func TestReconcile(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: - es := commontest.TestExternalSecrets() - testExtendExternalSecrets(es) - es.DeepCopyInto(o) + case *operatorv1alpha1.ExternalSecretsConfig: + esc := commontest.TestExternalSecretsConfig() + testExtendExternalSecretsConfig(esc) + esc.DeepCopyInto(o) case *crdv1.CustomResourceDefinition: crd := testCRD() crd.DeepCopyInto(o) @@ -398,7 +400,7 @@ func TestReconcile(t *testing.T) { Reason: operatorv1alpha1.ReasonCompleted, }, }, - wantErr: `failed to update externalsecrets.openshift.operator.io "/cluster" status: test client error`, + wantErr: `failed to update externalsecretsconfigs.operator.openshift.io "/cluster" status: test client error`, }, } @@ -415,12 +417,12 @@ func TestReconcile(t *testing.T) { if (tt.wantErr != "" || err != nil) && (err == nil || err.Error() != tt.wantErr) { t.Errorf("Reconcile() err: %v, wantErr: %v", err, tt.wantErr) } - es := &operatorv1alpha1.ExternalSecrets{} + esc := &operatorv1alpha1.ExternalSecretsConfig{} key := types.NamespacedName{ - Name: common.ExternalSecretsObjectName, + Name: common.ExternalSecretsConfigObjectName, } - r.CtrlClient.Get(r.ctx, key, es) - for _, c1 := range es.Status.Conditions { + _ = r.CtrlClient.Get(r.ctx, key, esc) + for _, c1 := range esc.Status.Conditions { for _, c2 := range tt.expectedStatusCondition { if c1.Type == c2.Type { if c1.Status != c2.Status || c1.Reason != c2.Reason { diff --git a/pkg/controller/external_secrets/certificate.go b/pkg/controller/external_secrets/certificate.go index 359ebf39..a0a96bfd 100644 --- a/pkg/controller/external_secrets/certificate.go +++ b/pkg/controller/external_secrets/certificate.go @@ -2,6 +2,7 @@ package external_secrets import ( "fmt" + "reflect" "strings" corev1 "k8s.io/api/core/v1" @@ -17,30 +18,34 @@ import ( ) var ( - serviceExternalSecretWebhookName string = "external-secrets-webhook" + serviceExternalSecretWebhookName = "external-secrets-webhook" ) -func (r *Reconciler) createOrApplyCertificates(es *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string, recon bool) error { - if isCertManagerConfigEnabled(es) { - if err := r.createOrApplyCertificate(es, resourceLabels, webhookCertificateAssetName, recon); err != nil { +func (r *Reconciler) createOrApplyCertificates(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string, recon bool) error { + if isCertManagerConfigEnabled(esc) { + if err := r.createOrApplyCertificate(esc, resourceLabels, webhookCertificateAssetName, recon); err != nil { return err } } - if isBitwardenConfigEnabled(es) { - bitwardenConfig := es.Spec.ExternalSecretsConfig.BitwardenSecretManagerProvider - if bitwardenConfig.SecretRef.Name != "" { - return r.assertSecretRefExists(es, es.Spec.ExternalSecretsConfig.BitwardenSecretManagerProvider) + if isBitwardenConfigEnabled(esc) { + bitwardenConfig := esc.Spec.Plugins.BitwardenSecretManagerProvider + if bitwardenConfig.SecretRef != nil && bitwardenConfig.SecretRef.Name != "" { + return r.assertSecretRefExists(esc, esc.Spec.Plugins.BitwardenSecretManagerProvider) } - if err := r.createOrApplyCertificate(es, resourceLabels, bitwardenCertificateAssetName, recon); err != nil { + if !isCertManagerConfigEnabled(esc) { + return common.NewIrrecoverableError(fmt.Errorf("invalid bitwardenSecretManagerProvider config"), + "either secretRef or certManagerConfig must be configured, when bitwardenSecretManagerProvider is enabled") + } + if err := r.createOrApplyCertificate(esc, resourceLabels, bitwardenCertificateAssetName, recon); err != nil { return err } } return nil } -func (r *Reconciler) createOrApplyCertificate(es *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string, fileName string, recon bool) error { - desired, err := r.getCertificateObject(es, resourceLabels, fileName) +func (r *Reconciler) createOrApplyCertificate(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string, fileName string, recon bool) error { + desired, err := r.getCertificateObject(esc, resourceLabels, fileName) if err != nil { return err } @@ -58,14 +63,14 @@ func (r *Reconciler) createOrApplyCertificate(es *operatorv1alpha1.ExternalSecre } if exist && recon { - r.eventRecorder.Eventf(es, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s certificate resource already exists, maybe from previous installation", certificateName) + r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s certificate resource already exists, maybe from previous installation", certificateName) } if exist && common.HasObjectChanged(desired, fetched) { r.log.V(1).Info("certificate has been modified, updating to desired state", "name", certificateName) if err := r.UpdateWithRetry(r.ctx, desired); err != nil { return common.FromClientError(err, "failed to update %s certificate resource", certificateName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "certificate resource %s reconciled back to desired state", certificateName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "certificate resource %s reconciled back to desired state", certificateName) } else { r.log.V(4).Info("certificate resource already exists and is in expected state", "name", certificateName) } @@ -73,35 +78,37 @@ func (r *Reconciler) createOrApplyCertificate(es *operatorv1alpha1.ExternalSecre if err := r.Create(r.ctx, desired); err != nil { return common.FromClientError(err, "failed to create %s certificate resource", certificateName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "certificate resource %s created", certificateName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "certificate resource %s created", certificateName) } return nil } -func (r *Reconciler) getCertificateObject(es *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string, fileName string) (*certmanagerv1.Certificate, error) { +func (r *Reconciler) getCertificateObject(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string, fileName string) (*certmanagerv1.Certificate, error) { certificate := common.DecodeCertificateObjBytes(assets.MustAsset(fileName)) - updateNamespace(certificate, es) + updateNamespace(certificate, esc) common.UpdateResourceLabels(certificate, resourceLabels) - if err := r.updateCertificateParams(es, certificate); err != nil { - return nil, common.NewIrrecoverableError(err, "failed to update certificate resource for %s/%s deployment", getNamespace(es), es.GetName()) + if err := r.updateCertificateParams(esc, certificate); err != nil { + return nil, common.NewIrrecoverableError(err, "failed to update certificate resource for %s/%s deployment", getNamespace(esc), esc.GetName()) } return certificate, nil } -func (r *Reconciler) updateCertificateParams(es *operatorv1alpha1.ExternalSecrets, certificate *certmanagerv1.Certificate) error { +func (r *Reconciler) updateCertificateParams(esc *operatorv1alpha1.ExternalSecretsConfig, certificate *certmanagerv1.Certificate) error { certManageConfig := &operatorv1alpha1.CertManagerConfig{} - if es.Spec.ExternalSecretsConfig != nil && es.Spec.ExternalSecretsConfig.CertManagerConfig != nil { - certManageConfig = es.Spec.ExternalSecretsConfig.CertManagerConfig + if esc.Spec.ControllerConfig.CertProvider != nil && esc.Spec.ControllerConfig.CertProvider.CertManager != nil { + certManageConfig = esc.Spec.ControllerConfig.CertProvider.CertManager + } + if reflect.ValueOf(certManageConfig.IssuerRef).IsZero() { + return fmt.Errorf("certManageConfig is enabled without IssuerRef") } - externalSecretsNamespace := getNamespace(es) - if certManageConfig.IssuerRef.Name == "" { return fmt.Errorf("issuerRef.Name not present") } + externalSecretsNamespace := getNamespace(esc) certificate.Spec.IssuerRef = v1.ObjectReference{ Name: certManageConfig.IssuerRef.Name, @@ -144,10 +151,10 @@ func (r *Reconciler) assertIssuerRefExists(issueRef v1.ObjectReference, namespac return nil } -func (r *Reconciler) assertSecretRefExists(es *operatorv1alpha1.ExternalSecrets, bitwardenConfig *operatorv1alpha1.BitwardenSecretManagerProvider) error { +func (r *Reconciler) assertSecretRefExists(esc *operatorv1alpha1.ExternalSecretsConfig, bitwardenConfig *operatorv1alpha1.BitwardenSecretManagerProvider) error { namespacedName := types.NamespacedName{ Name: bitwardenConfig.SecretRef.Name, - Namespace: getNamespace(es), + Namespace: getNamespace(esc), } object := &corev1.Secret{} @@ -172,11 +179,11 @@ func (r *Reconciler) getIssuer(issuerRef v1.ObjectReference, namespace string) ( object = &certmanagerv1.Issuer{} } - if ifExists, err := r.UncachedClient.Exists(r.ctx, namespacedName, object); err != nil { + ifExists, err := r.UncachedClient.Exists(r.ctx, namespacedName, object) + if err != nil { return ifExists, fmt.Errorf("failed to fetch %q issuer: %w", namespacedName, err) - } else { - return ifExists, nil } + return ifExists, nil } func updateNamespaceForFQDN(fqdns []string, namespace string) []string { diff --git a/pkg/controller/external_secrets/certificate_test.go b/pkg/controller/external_secrets/certificate_test.go index 12883d64..5f3fcd0b 100644 --- a/pkg/controller/external_secrets/certificate_test.go +++ b/pkg/controller/external_secrets/certificate_test.go @@ -27,32 +27,26 @@ func TestCreateOrApplyCertificates(t *testing.T) { tests := []struct { name string preReq func(*Reconciler, *fakes.FakeCtrlClient) - es func(*v1alpha1.ExternalSecrets) + esc func(*v1alpha1.ExternalSecretsConfig) recon bool wantErr string }{ { name: "external secret spec disabled", preReq: nil, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec = v1alpha1.ExternalSecretsSpec{} - }, - recon: false, - }, - { - name: "externalSecretConfig is nil", - preReq: nil, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = nil + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec = v1alpha1.ExternalSecretsConfigSpec{} }, recon: false, }, { name: "webhook config is nil", preReq: nil, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{ - WebhookConfig: nil, + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec = v1alpha1.ExternalSecretsConfigSpec{ + ApplicationConfig: v1alpha1.ApplicationConfig{ + WebhookConfig: nil, + }, } }, recon: false, @@ -60,9 +54,11 @@ func TestCreateOrApplyCertificates(t *testing.T) { { name: "webhook config is empty", preReq: nil, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{ - WebhookConfig: &v1alpha1.WebhookConfig{}, + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec = v1alpha1.ExternalSecretsConfigSpec{ + ApplicationConfig: v1alpha1.ApplicationConfig{ + WebhookConfig: &v1alpha1.WebhookConfig{}, + }, } }, recon: false, @@ -70,9 +66,13 @@ func TestCreateOrApplyCertificates(t *testing.T) { { name: "cert manager config is nil", preReq: nil, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{ - CertManagerConfig: nil, + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec = v1alpha1.ExternalSecretsConfigSpec{ + ControllerConfig: v1alpha1.ControllerConfig{ + CertProvider: &v1alpha1.CertProvidersConfig{ + CertManager: nil, + }, + }, } }, recon: false, @@ -80,12 +80,13 @@ func TestCreateOrApplyCertificates(t *testing.T) { { name: "cert manager config enabled but issuerRef.Name is empty", preReq: nil, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig.CertManagerConfig.Enabled = "true" - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "" + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec.ControllerConfig.CertProvider.CertManager.Mode = v1alpha1.Enabled + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "" + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Kind = "Issuer" }, recon: false, - wantErr: fmt.Sprintf("failed to update certificate resource for %s/%s deployment: issuerRef.Name not present", commontest.TestExternalSecretsNamespace, testExternalSecretsForCertificate().GetName()), + wantErr: fmt.Sprintf("failed to update certificate resource for %s/%s deployment: issuerRef.Name not present", commontest.TestExternalSecretsNamespace, testExternalSecretsConfigForCertificate().GetName()), }, { name: "reconciliation of webhook certificate fails while checking if exists", @@ -118,9 +119,9 @@ func TestCreateOrApplyCertificates(t *testing.T) { return fmt.Errorf("object not found: %s/%s", ns.Namespace, ns.Name) }) }, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig.CertManagerConfig.Enabled = "true" - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "test-issuer" + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec.ControllerConfig.CertProvider.CertManager.Mode = v1alpha1.Enabled + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "test-issuer" }, recon: false, wantErr: fmt.Sprintf("failed to check %s/%s certificate resource already exists: %s", commontest.TestExternalSecretsNamespace, testValidateCertificateResourceName, commontest.TestClientError), @@ -171,9 +172,9 @@ func TestCreateOrApplyCertificates(t *testing.T) { return nil }) }, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig.CertManagerConfig.Enabled = "true" - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "test-issuer" + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec.ControllerConfig.CertProvider.CertManager.Mode = v1alpha1.Enabled + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "test-issuer" }, recon: false, wantErr: fmt.Sprintf("failed to update %s/%s certificate resource: %s", commontest.TestExternalSecretsNamespace, testValidateCertificateResourceName, commontest.TestClientError), @@ -185,9 +186,9 @@ func TestCreateOrApplyCertificates(t *testing.T) { switch o := obj.(type) { case *certmanagerv1.Certificate: if ns.Name == serviceExternalSecretWebhookName { - es := testExternalSecretsForCertificate() - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "test-issuer" - desiredCert, _ := r.getCertificateObject(es, controllerDefaultResourceLabels, webhookCertificateAssetName) + esc := testExternalSecretsConfigForCertificate() + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "test-issuer" + desiredCert, _ := r.getCertificateObject(esc, controllerDefaultResourceLabels, webhookCertificateAssetName) desiredCert.DeepCopyInto(o) return nil } @@ -226,9 +227,9 @@ func TestCreateOrApplyCertificates(t *testing.T) { return nil }) }, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig.CertManagerConfig.Enabled = "true" - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "test-issuer" + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec.ControllerConfig.CertProvider.CertManager.Mode = v1alpha1.Enabled + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "test-issuer" }, recon: false, }, @@ -258,9 +259,9 @@ func TestCreateOrApplyCertificates(t *testing.T) { return fmt.Errorf("object not found") }) }, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig.CertManagerConfig.Enabled = "true" - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "test-issuer" + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec.ControllerConfig.CertProvider.CertManager.Mode = v1alpha1.Enabled + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "test-issuer" }, recon: false, wantErr: fmt.Sprintf("failed to create %s/%s certificate resource: %s", commontest.TestExternalSecretsNamespace, testValidateCertificateResourceName, commontest.TestClientError), @@ -292,9 +293,9 @@ func TestCreateOrApplyCertificates(t *testing.T) { return fmt.Errorf("object not found") }) }, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig.CertManagerConfig.Enabled = "true" - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "test-issuer" + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec.ControllerConfig.CertProvider.CertManager.Mode = v1alpha1.Enabled + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "test-issuer" }, recon: false, }, @@ -303,9 +304,9 @@ func TestCreateOrApplyCertificates(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.ExistsCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) (bool, error) { if ns.Name == serviceExternalSecretWebhookName { - es := testExternalSecretsForCertificate() - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "test-issuer" - desiredCert, _ := r.getCertificateObject(es, controllerDefaultResourceLabels, webhookCertificateAssetName) + esc := testExternalSecretsConfigForCertificate() + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "test-issuer" + desiredCert, _ := r.getCertificateObject(esc, controllerDefaultResourceLabels, webhookCertificateAssetName) desiredCert.DeepCopyInto(obj.(*certmanagerv1.Certificate)) return true, nil } @@ -338,14 +339,14 @@ func TestCreateOrApplyCertificates(t *testing.T) { return nil }) }, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig.CertManagerConfig.Enabled = "true" - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "test-issuer" - es.Spec.ExternalSecretsConfig.BitwardenSecretManagerProvider = &v1alpha1.BitwardenSecretManagerProvider{ - SecretRef: v1alpha1.SecretReference{ + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec.ControllerConfig.CertProvider.CertManager.Mode = v1alpha1.Enabled + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "test-issuer" + esc.Spec.Plugins.BitwardenSecretManagerProvider = &v1alpha1.BitwardenSecretManagerProvider{ + SecretRef: &v1alpha1.SecretReference{ Name: "bitwarden-secret", }, - Enabled: "true", + Mode: v1alpha1.Enabled, } }, recon: false, @@ -356,9 +357,9 @@ func TestCreateOrApplyCertificates(t *testing.T) { preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.ExistsCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) (bool, error) { if ns.Name == serviceExternalSecretWebhookName { - es := testExternalSecretsForCertificate() - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "test-issuer" - desiredCert, _ := r.getCertificateObject(es, controllerDefaultResourceLabels, webhookCertificateAssetName) + esc := testExternalSecretsConfigForCertificate() + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "test-issuer" + desiredCert, _ := r.getCertificateObject(esc, controllerDefaultResourceLabels, webhookCertificateAssetName) desiredCert.DeepCopyInto(obj.(*certmanagerv1.Certificate)) return true, nil } @@ -386,14 +387,14 @@ func TestCreateOrApplyCertificates(t *testing.T) { return nil }) }, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig.CertManagerConfig.Enabled = "true" - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "test-issuer" - es.Spec.ExternalSecretsConfig.BitwardenSecretManagerProvider = &v1alpha1.BitwardenSecretManagerProvider{ - SecretRef: v1alpha1.SecretReference{ + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec.ControllerConfig.CertProvider.CertManager.Mode = v1alpha1.Enabled + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "test-issuer" + esc.Spec.Plugins.BitwardenSecretManagerProvider = &v1alpha1.BitwardenSecretManagerProvider{ + SecretRef: &v1alpha1.SecretReference{ Name: "bitwarden-secret", }, - Enabled: "true", + Mode: v1alpha1.Enabled, } }, recon: false, @@ -433,10 +434,10 @@ func TestCreateOrApplyCertificates(t *testing.T) { return fmt.Errorf("object not found") }) }, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig.CertManagerConfig.Enabled = "true" - es.Spec.ExternalSecretsConfig.CertManagerConfig.IssuerRef.Name = "test-issuer" - es.Spec.ExternalSecretsConfig.BitwardenSecretManagerProvider = nil + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec.ControllerConfig.CertProvider.CertManager.Mode = v1alpha1.Enabled + esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Name = "test-issuer" + esc.Spec.Plugins.BitwardenSecretManagerProvider = nil }, recon: false, }, @@ -452,12 +453,12 @@ func TestCreateOrApplyCertificates(t *testing.T) { r.CtrlClient = mock r.UncachedClient = mock - es := testExternalSecretsForCertificate() - if tt.es != nil { - tt.es(es) + esc := testExternalSecretsConfigForCertificate() + if tt.esc != nil { + tt.esc(esc) } - err := r.createOrApplyCertificates(es, controllerDefaultResourceLabels, tt.recon) + err := r.createOrApplyCertificates(esc, controllerDefaultResourceLabels, tt.recon) if (tt.wantErr != "" || err != nil) && (err == nil || err.Error() != tt.wantErr) { t.Errorf("createOrApplyCertificates() err: %v, wantErr: %v", err, tt.wantErr) } @@ -465,21 +466,22 @@ func TestCreateOrApplyCertificates(t *testing.T) { } } -func testExternalSecretsForCertificate() *v1alpha1.ExternalSecrets { - externalSecrets := commontest.TestExternalSecrets() - - externalSecrets.Spec = v1alpha1.ExternalSecretsSpec{ - ControllerConfig: &v1alpha1.ControllerConfig{ - Namespace: commontest.TestExternalSecretsNamespace, - }, - ExternalSecretsConfig: &v1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &v1alpha1.CertManagerConfig{ - Enabled: "true", +func testExternalSecretsConfigForCertificate() *v1alpha1.ExternalSecretsConfig { + esc := commontest.TestExternalSecretsConfig() + esc.Spec = v1alpha1.ExternalSecretsConfigSpec{ + ControllerConfig: v1alpha1.ControllerConfig{ + CertProvider: &v1alpha1.CertProvidersConfig{ + CertManager: &v1alpha1.CertManagerConfig{}, }, + }, + ApplicationConfig: v1alpha1.ApplicationConfig{ + OperatingNamespace: "test-ns", + }, + Plugins: v1alpha1.PluginsConfig{ BitwardenSecretManagerProvider: &v1alpha1.BitwardenSecretManagerProvider{}, }, } - return externalSecrets + return esc } // testIssuer creates a dummy cert-manager Issuer for testing diff --git a/pkg/controller/external_secrets/constants.go b/pkg/controller/external_secrets/constants.go index 4906b951..389199fa 100644 --- a/pkg/controller/external_secrets/constants.go +++ b/pkg/controller/external_secrets/constants.go @@ -16,8 +16,8 @@ const ( // ControllerName is the name of the controller used in logs and events. ControllerName = externalsecretsCommonName + "-controller" - // finalizer name for external-secrets.openshift.operator.io resource. - finalizer = "externalsecrets.openshift.operator.io/" + ControllerName + // finalizer name for externalsecretsconfigs.operator.openshift.io resource. + finalizer = "externalsecretsconfigs.operator.openshift.io/" + ControllerName // controllerProcessedAnnotation is the annotation added to external-secrets resource once after // successful reconciliation by the controller. @@ -46,7 +46,7 @@ const ( bitwardenImageVersionEnvVarName = "BITWARDEN_SDK_SERVER_IMAGE_VERSION" // externalsecretsDefaultNamespace is the namespace where the `external-secrets` operand required resources - // will be created, when ExternalSecrets.Spec.ControllerConfig.Namespace is not set. + // will be created, when ExternalSecretsConfig.Spec.Namespace is not set. externalsecretsDefaultNamespace = "external-secrets" ) diff --git a/pkg/controller/external_secrets/controller.go b/pkg/controller/external_secrets/controller.go index dd0a88fe..68bca661 100644 --- a/pkg/controller/external_secrets/controller.go +++ b/pkg/controller/external_secrets/controller.go @@ -78,7 +78,7 @@ var ( } ) -// Reconciler reconciles a ExternalSecrets object +// Reconciler reconciles a ExternalSecretsConfig object type Reconciler struct { operatorclient.CtrlClient UncachedClient operatorclient.CtrlClient @@ -90,10 +90,10 @@ type Reconciler struct { optionalResourcesList map[string]struct{} } -// +kubebuilder:rbac:groups=operator.openshift.io,resources=externalsecrets,verbs=get;list;watch;create;update +// +kubebuilder:rbac:groups=operator.openshift.io,resources=externalsecretsconfigs,verbs=get;list;watch;create;update +// +kubebuilder:rbac:groups=operator.openshift.io,resources=externalsecretsconfigs/status,verbs=get;update +// +kubebuilder:rbac:groups=operator.openshift.io,resources=externalsecretsconfigs/finalizers,verbs=update // +kubebuilder:rbac:groups=operator.openshift.io,resources=externalsecretsmanagers,verbs=get;list;watch;create;update -// +kubebuilder:rbac:groups=operator.openshift.io,resources=externalsecrets/status,verbs=get;update -// +kubebuilder:rbac:groups=operator.openshift.io,resources=externalsecrets/finalizers,verbs=update // +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;watch;create;update;patch // +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles;rolebindings;clusterroles;clusterrolebindings,verbs=get;list;watch;create;update;patch;delete @@ -178,7 +178,7 @@ func BuildCustomClient(mgr ctrl.Manager, r *Reconciler) (client.Client, error) { Label: managedResourceLabelReqSelector, } } - ownObject := &operatorv1alpha1.ExternalSecrets{} + ownObject := &operatorv1alpha1.ExternalSecretsConfig{} objectList[ownObject] = cache.ByObject{} esmObject := &operatorv1alpha1.ExternalSecretsManager{} objectList[esmObject] = cache.ByObject{} @@ -257,12 +257,11 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return []reconcile.Request{ { NamespacedName: types.NamespacedName{ - Name: common.ExternalSecretsObjectName, + Name: common.ExternalSecretsConfigObjectName, }, }, } } - } r.log.V(4).Info("object not of interest, ignoring reconcile event", "object", fmt.Sprintf("%T", obj), "name", obj.GetName(), "namespace", obj.GetNamespace()) return []reconcile.Request{} @@ -276,7 +275,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { managedResourcePredicate := builder.WithPredicates(managedResources) mgrBuilder := ctrl.NewControllerManagedBy(mgr). - For(&operatorv1alpha1.ExternalSecrets{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). + For(&operatorv1alpha1.ExternalSecretsConfig{}, builder.WithPredicates(predicate.GenerationChangedPredicate{})). Named(ControllerName) for _, res := range controllerManagedResources { @@ -325,33 +324,33 @@ func isCRDInstalled(config *rest.Config, name, groupVersion string) (bool, error } // Reconcile is the reconciliation loop to manage the current state external-secrets -// deployment to reflect desired state configured in `externalsecrets.openshift.operator.io`. +// deployment to reflect desired state configured in `externalsecretsconfigs.operator.openshift.io`. func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.log.V(1).Info("reconciling", "request", req) - // Fetch the externalsecrets.openshift.operator.io CR - externalsecrets := &operatorv1alpha1.ExternalSecrets{} - if err := r.Get(ctx, req.NamespacedName, externalsecrets); err != nil { + // Fetch the externalsecretsconfigs.operator.openshift.io CR + esc := &operatorv1alpha1.ExternalSecretsConfig{} + if err := r.Get(ctx, req.NamespacedName, esc); err != nil { if errors.IsNotFound(err) { // NotFound errors, since they can't be fixed by an immediate // requeue (have to wait for a new notification), and can be processed // on deleted requests. - r.log.V(1).Info("externalsecrets.openshift.operator.io object not found, skipping reconciliation", "request", req) + r.log.V(1).Info("externalsecretsconfigs.operator.openshift.io object not found, skipping reconciliation", "request", req) return ctrl.Result{}, nil } - return ctrl.Result{}, fmt.Errorf("failed to fetch externalsecrets.openshift.operator.io %q during reconciliation: %w", req.NamespacedName, err) + return ctrl.Result{}, fmt.Errorf("failed to fetch externalsecretsconfigs.operator.openshift.io %q during reconciliation: %w", req.NamespacedName, err) } - if !externalsecrets.DeletionTimestamp.IsZero() { - r.log.V(1).Info("externalsecrets.openshift.operator.io is marked for deletion", "namespace", req.NamespacedName) + if !esc.DeletionTimestamp.IsZero() { + r.log.V(1).Info("externalsecretsconfigs.operator.openshift.io is marked for deletion", "namespace", req.NamespacedName) - if requeue, err := r.cleanUp(externalsecrets); err != nil { - return ctrl.Result{}, fmt.Errorf("clean up failed for %q externalsecrets.openshift.operator.io instance deletion: %w", req.NamespacedName, err) + if requeue, err := r.cleanUp(esc); err != nil { + return ctrl.Result{}, fmt.Errorf("clean up failed for %q externalsecretsconfigs.operator.openshift.io instance deletion: %w", req.NamespacedName, err) } else if requeue { return ctrl.Result{RequeueAfter: common.DefaultRequeueTime}, nil } - if err := common.RemoveFinalizer(ctx, externalsecrets, r.CtrlClient, finalizer); err != nil { + if err := common.RemoveFinalizer(ctx, esc, r.CtrlClient, finalizer); err != nil { return ctrl.Result{}, err } @@ -359,12 +358,12 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, nil } - // Set finalizers on the externalsecrets.openshift.operator.io resource - if err := common.AddFinalizer(ctx, externalsecrets, r.CtrlClient, finalizer); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to update %q externalsecrets.openshift.operator.io with finalizers: %w", req.NamespacedName, err) + // Set finalizers on the externalsecretsconfigs.operator.openshift.io resource + if err := common.AddFinalizer(ctx, esc, r.CtrlClient, finalizer); err != nil { + return ctrl.Result{}, fmt.Errorf("failed to update %q externalsecretsconfigs.operator.openshift.io with finalizers: %w", req.NamespacedName, err) } - // Fetch the externalsecretsmanager.openshift.operator.io CR + // Fetch the externalsecretsmanagers.operator.openshift.io CR esmNamespacedName := types.NamespacedName{ Name: common.ExternalSecretsManagerObjectName, } @@ -372,25 +371,25 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu if errors.IsNotFound(err) { // NotFound errors, since they can't be fixed by an immediate // requeue (have to wait for a new notification). - r.log.V(1).Info("externalsecretsmanager.openshift.operator.io object not found, continuing without it") + r.log.V(1).Info("externalsecretsmanagers.operator.openshift.io object not found, continuing without it") } else { - return ctrl.Result{}, fmt.Errorf("failed to fetch externalsecretsmanager.openshift.operator.io %q during reconciliation: %w", esmNamespacedName, err) + return ctrl.Result{}, fmt.Errorf("failed to fetch externalsecretsmanagers.operator.openshift.io %q during reconciliation: %w", esmNamespacedName, err) } } - return r.processReconcileRequest(externalsecrets, req.NamespacedName) + return r.processReconcileRequest(esc, req.NamespacedName) } -func (r *Reconciler) processReconcileRequest(externalsecrets *operatorv1alpha1.ExternalSecrets, req types.NamespacedName) (ctrl.Result, error) { +func (r *Reconciler) processReconcileRequest(esc *operatorv1alpha1.ExternalSecretsConfig, req types.NamespacedName) (ctrl.Result, error) { createRecon := false - if !containsProcessedAnnotation(externalsecrets) && reflect.DeepEqual(externalsecrets.Status, operatorv1alpha1.ExternalSecretsStatus{}) { - r.log.V(1).Info("starting reconciliation of newly created externalsecrets.openshift.operator.io", "namespace", externalsecrets.GetNamespace(), "name", externalsecrets.GetName()) + if !containsProcessedAnnotation(esc) && reflect.DeepEqual(esc.Status, operatorv1alpha1.ExternalSecretsConfigStatus{}) { + r.log.V(1).Info("starting reconciliation of newly created externalsecretsconfigs.operator.openshift.io", "namespace", esc.GetNamespace(), "name", esc.GetName()) createRecon = true } var errUpdate error = nil - observedGeneration := externalsecrets.GetGeneration() - err := r.reconcileExternalSecretsDeployment(externalsecrets, createRecon) + observedGeneration := esc.GetGeneration() + err := r.reconcileExternalSecretsDeployment(esc, createRecon) if err != nil { r.log.Error(err, "failed to reconcile external-secrets deployment", "request", req) isFatal := common.IsIrrecoverableError(err) @@ -420,9 +419,9 @@ func (r *Reconciler) processReconcileRequest(externalsecrets *operatorv1alpha1.E readyCond.Message = fmt.Sprintf("reconciliation failed, retrying: %v", err) } - if apimeta.SetStatusCondition(&externalsecrets.Status.Conditions, degradedCond) || - apimeta.SetStatusCondition(&externalsecrets.Status.Conditions, readyCond) { - errUpdate = r.updateCondition(externalsecrets, err) + if apimeta.SetStatusCondition(&esc.Status.Conditions, degradedCond) || + apimeta.SetStatusCondition(&esc.Status.Conditions, readyCond) { + errUpdate = r.updateCondition(esc, err) err = utilerrors.NewAggregate([]error{err, errUpdate}) } @@ -447,17 +446,17 @@ func (r *Reconciler) processReconcileRequest(externalsecrets *operatorv1alpha1.E ObservedGeneration: observedGeneration, } - if apimeta.SetStatusCondition(&externalsecrets.Status.Conditions, degradedCond) || - apimeta.SetStatusCondition(&externalsecrets.Status.Conditions, readyCond) { - errUpdate = r.updateCondition(externalsecrets, nil) + if apimeta.SetStatusCondition(&esc.Status.Conditions, degradedCond) || + apimeta.SetStatusCondition(&esc.Status.Conditions, readyCond) { + errUpdate = r.updateCondition(esc, nil) } return ctrl.Result{}, errUpdate } -// cleanUp handles deletion of externalsecrets.openshift.operator.io gracefully. -func (r *Reconciler) cleanUp(externalsecrets *operatorv1alpha1.ExternalSecrets) (bool, error) { +// cleanUp handles deletion of externalsecretsconfigs.operator.openshift.io gracefully. +func (r *Reconciler) cleanUp(esc *operatorv1alpha1.ExternalSecretsConfig) (bool, error) { // TODO: For GA, handle cleaning up of resources created for installing external-secrets operand. - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeWarning, "RemoveDeployment", "%s/%s externalsecrets.openshift.operator.io marked for deletion, remove reference in deployment and remove all resources created for deployment", externalsecrets.GetNamespace(), externalsecrets.GetName()) + r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "RemoveDeployment", "%s/%s externalsecretsconfigs.operator.openshift.io marked for deletion, remove reference in deployment and remove all resources created for deployment", esc.GetNamespace(), esc.GetName()) return false, nil } diff --git a/pkg/controller/external_secrets/deployments.go b/pkg/controller/external_secrets/deployments.go index c659f51e..80a1a682 100644 --- a/pkg/controller/external_secrets/deployments.go +++ b/pkg/controller/external_secrets/deployments.go @@ -3,7 +3,6 @@ package external_secrets import ( "fmt" "os" - "reflect" "unsafe" appsv1 "k8s.io/api/apps/v1" @@ -21,7 +20,7 @@ import ( ) // createOrApplyDeployments ensures required Deployment resources exist and are correctly configured. -func (r *Reconciler) createOrApplyDeployments(externalsecrets *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string, externalsecretsCreateRecon bool) error { +func (r *Reconciler) createOrApplyDeployments(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string, externalSecretsConfigCreateRecon bool) error { // Define all Deployment assets to apply based on conditions. deployments := []struct { assetName string @@ -37,11 +36,11 @@ func (r *Reconciler) createOrApplyDeployments(externalsecrets *operatorv1alpha1. }, { assetName: certControllerDeploymentAssetName, - condition: !isCertManagerConfigEnabled(externalsecrets), + condition: !isCertManagerConfigEnabled(esc), }, { assetName: bitwardenDeploymentAssetName, - condition: isBitwardenConfigEnabled(externalsecrets), + condition: isBitwardenConfigEnabled(esc), }, } @@ -53,23 +52,23 @@ func (r *Reconciler) createOrApplyDeployments(externalsecrets *operatorv1alpha1. } continue } - if err := r.createOrApplyDeploymentFromAsset(externalsecrets, d.assetName, resourceLabels, externalsecretsCreateRecon); err != nil { + if err := r.createOrApplyDeploymentFromAsset(esc, d.assetName, resourceLabels, externalSecretsConfigCreateRecon); err != nil { return err } } - if err := r.updateImageInStatus(externalsecrets); err != nil { - return common.FromClientError(err, "failed to update %s/%s status with image info", externalsecrets.GetNamespace(), externalsecrets.GetName()) + if err := r.updateImageInStatus(esc); err != nil { + return common.FromClientError(err, "failed to update %s/%s status with image info", esc.GetNamespace(), esc.GetName()) } return nil } -func (r *Reconciler) createOrApplyDeploymentFromAsset(externalsecrets *operatorv1alpha1.ExternalSecrets, assetName string, resourceLabels map[string]string, - externalsecretsCreateRecon bool, +func (r *Reconciler) createOrApplyDeploymentFromAsset(esc *operatorv1alpha1.ExternalSecretsConfig, assetName string, resourceLabels map[string]string, + externalSecretsConfigCreateRecon bool, ) error { - deployment, err := r.getDeploymentObject(assetName, externalsecrets, resourceLabels) + deployment, err := r.getDeploymentObject(assetName, esc, resourceLabels) if err != nil { return err } @@ -84,20 +83,20 @@ func (r *Reconciler) createOrApplyDeploymentFromAsset(externalsecrets *operatorv if err != nil { return common.FromClientError(err, "failed to check %s deployment resource already exists", deploymentName) } - if exist && externalsecretsCreateRecon { - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s deployment resource already exists", deploymentName) + if exist && externalSecretsConfigCreateRecon { + r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s deployment resource already exists", deploymentName) } if exist && common.HasObjectChanged(deployment, fetched) { r.log.V(1).Info("deployment has been modified, updating to desired state", "name", deploymentName) if err := r.UpdateWithRetry(r.ctx, deployment); err != nil { return common.FromClientError(err, "failed to update %s deployment resource", deploymentName) } - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeNormal, "Reconciled", "deployment resource %s updated", deploymentName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "deployment resource %s updated", deploymentName) } else if !exist { if err := r.Create(r.ctx, deployment); err != nil { return common.FromClientError(err, "failed to create %s deployment resource", deploymentName) } - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeNormal, "Reconciled", "deployment resource %s created", deploymentName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "deployment resource %s created", deploymentName) } else { r.log.V(4).Info("deployment resource already exists and is in expected state", "name", deploymentName) } @@ -105,9 +104,9 @@ func (r *Reconciler) createOrApplyDeploymentFromAsset(externalsecrets *operatorv return nil } -func (r *Reconciler) getDeploymentObject(assetName string, externalsecrets *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string) (*appsv1.Deployment, error) { +func (r *Reconciler) getDeploymentObject(assetName string, esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string) (*appsv1.Deployment, error) { deployment := common.DecodeDeploymentObjBytes(assets.MustAsset(assetName)) - updateNamespace(deployment, externalsecrets) + updateNamespace(deployment, esc) common.UpdateResourceLabels(deployment, resourceLabels) updatePodTemplateLabels(deployment, resourceLabels) @@ -119,13 +118,18 @@ func (r *Reconciler) getDeploymentObject(assetName string, externalsecrets *oper if bitwardenImage == "" { return nil, common.NewIrrecoverableError(fmt.Errorf("%s environment variable with bitwarden-sdk-server image not set", bitwardenImageEnvVarName), "failed to update image in %s deployment object", deployment.GetName()) } - logLevel := getLogLevel(externalsecrets.Spec.ExternalSecretsConfig) + logLevel := getLogLevel(esc.Spec) switch assetName { case controllerDeploymentAssetName: - updateContainerSpec(deployment, externalsecrets, image, logLevel) + updateContainerSpec(deployment, esc, image, logLevel) case webhookDeploymentAssetName: - updateWebhookContainerSpec(deployment, image, logLevel) + checkInterval := "5m" + if esc.Spec.ApplicationConfig.WebhookConfig != nil && + esc.Spec.ApplicationConfig.WebhookConfig.CertificateCheckInterval != nil { + checkInterval = esc.Spec.ApplicationConfig.WebhookConfig.CertificateCheckInterval.Duration.String() + } + updateWebhookContainerSpec(deployment, image, logLevel, checkInterval) case certControllerDeploymentAssetName: updateCertControllerContainerSpec(deployment, image, logLevel) case bitwardenDeploymentAssetName: @@ -133,16 +137,16 @@ func (r *Reconciler) getDeploymentObject(assetName string, externalsecrets *oper updateBitwardenServerContainerSpec(deployment, bitwardenImage) } - if err := r.updateResourceRequirement(deployment, externalsecrets); err != nil { + if err := r.updateResourceRequirement(deployment, esc); err != nil { return nil, fmt.Errorf("failed to update resource requirements: %w", err) } - if err := r.updateAffinityRules(deployment, externalsecrets); err != nil { + if err := r.updateAffinityRules(deployment, esc); err != nil { return nil, fmt.Errorf("failed to update affinity rules: %w", err) } - if err := r.updatePodTolerations(deployment, externalsecrets); err != nil { + if err := r.updatePodTolerations(deployment, esc); err != nil { return nil, fmt.Errorf("failed to update pod tolerations: %w", err) } - if err := r.updateNodeSelector(deployment, externalsecrets); err != nil { + if err := r.updateNodeSelector(deployment, esc); err != nil { return nil, fmt.Errorf("failed to update node selector: %w", err) } @@ -176,11 +180,11 @@ func updateContainerSecurityContext(container *corev1.Container) { } // updateResourceRequirement sets validated resource requirements to all containers. -func (r *Reconciler) updateResourceRequirement(deployment *appsv1.Deployment, externalsecrets *operatorv1alpha1.ExternalSecrets) error { +func (r *Reconciler) updateResourceRequirement(deployment *appsv1.Deployment, esc *operatorv1alpha1.ExternalSecretsConfig) error { rscReqs := corev1.ResourceRequirements{} - if externalsecrets.Spec.ExternalSecretsConfig != nil && !reflect.ValueOf(externalsecrets.Spec.ExternalSecretsConfig.Resources).IsZero() { - externalsecrets.Spec.ExternalSecretsConfig.Resources.DeepCopyInto(&rscReqs) - } else if r.esm.Spec.GlobalConfig != nil && !reflect.ValueOf(r.esm.Spec.GlobalConfig.Resources).IsZero() { + if esc.Spec.ApplicationConfig.Resources != nil { + esc.Spec.ApplicationConfig.Resources.DeepCopyInto(&rscReqs) + } else if r.esm.Spec.GlobalConfig != nil && r.esm.Spec.GlobalConfig.Resources != nil { r.esm.Spec.GlobalConfig.Resources.DeepCopyInto(&rscReqs) } else { return nil @@ -207,11 +211,11 @@ func validateResourceRequirements(requirements corev1.ResourceRequirements, fldP } // updateNodeSelector sets and validates node selector constraints. -func (r *Reconciler) updateNodeSelector(deployment *appsv1.Deployment, externalsecrets *operatorv1alpha1.ExternalSecrets) error { +func (r *Reconciler) updateNodeSelector(deployment *appsv1.Deployment, esc *operatorv1alpha1.ExternalSecretsConfig) error { var nodeSelector map[string]string - if externalsecrets.Spec.ExternalSecretsConfig != nil && externalsecrets.Spec.ExternalSecretsConfig.NodeSelector != nil { - nodeSelector = externalsecrets.Spec.ExternalSecretsConfig.NodeSelector + if esc.Spec.ApplicationConfig.NodeSelector != nil { + nodeSelector = esc.Spec.ApplicationConfig.NodeSelector } else if r.esm.Spec.GlobalConfig != nil && r.esm.Spec.GlobalConfig.NodeSelector != nil { nodeSelector = r.esm.Spec.GlobalConfig.NodeSelector } @@ -220,7 +224,7 @@ func (r *Reconciler) updateNodeSelector(deployment *appsv1.Deployment, externals return nil } - if err := validateNodeSelectorConfig(nodeSelector, field.NewPath("spec", "externalSecretsConfig")); err != nil { + if err := validateNodeSelectorConfig(nodeSelector, field.NewPath("spec")); err != nil { return err } @@ -229,11 +233,11 @@ func (r *Reconciler) updateNodeSelector(deployment *appsv1.Deployment, externals } // updateAffinityRules sets and validates pod affinity/anti-affinity rules. -func (r *Reconciler) updateAffinityRules(deployment *appsv1.Deployment, externalsecrets *operatorv1alpha1.ExternalSecrets) error { +func (r *Reconciler) updateAffinityRules(deployment *appsv1.Deployment, esc *operatorv1alpha1.ExternalSecretsConfig) error { var affinity *corev1.Affinity - if externalsecrets.Spec.ExternalSecretsConfig != nil && externalsecrets.Spec.ExternalSecretsConfig.Affinity != nil { - affinity = externalsecrets.Spec.ExternalSecretsConfig.Affinity + if esc.Spec.ApplicationConfig.Affinity != nil { + affinity = esc.Spec.ApplicationConfig.Affinity } else if r.esm.Spec.GlobalConfig != nil && r.esm.Spec.GlobalConfig.Affinity != nil { affinity = r.esm.Spec.GlobalConfig.Affinity } @@ -242,7 +246,7 @@ func (r *Reconciler) updateAffinityRules(deployment *appsv1.Deployment, external return nil } - if err := validateAffinityRules(affinity, field.NewPath("spec", "externalSecretsConfig", "affinity")); err != nil { + if err := validateAffinityRules(affinity, field.NewPath("spec", "affinity")); err != nil { return err } @@ -251,11 +255,11 @@ func (r *Reconciler) updateAffinityRules(deployment *appsv1.Deployment, external } // updatePodTolerations sets and validates pod tolerations. -func (r *Reconciler) updatePodTolerations(deployment *appsv1.Deployment, externalsecrets *operatorv1alpha1.ExternalSecrets) error { +func (r *Reconciler) updatePodTolerations(deployment *appsv1.Deployment, esc *operatorv1alpha1.ExternalSecretsConfig) error { var tolerations []corev1.Toleration - if externalsecrets.Spec.ExternalSecretsConfig != nil && externalsecrets.Spec.ExternalSecretsConfig.Tolerations != nil { - tolerations = externalsecrets.Spec.ExternalSecretsConfig.Tolerations + if esc.Spec.ApplicationConfig.Tolerations != nil { + tolerations = esc.Spec.ApplicationConfig.Tolerations } else if r.esm.Spec.GlobalConfig != nil && r.esm.Spec.GlobalConfig.Tolerations != nil { tolerations = r.esm.Spec.GlobalConfig.Tolerations } @@ -264,7 +268,7 @@ func (r *Reconciler) updatePodTolerations(deployment *appsv1.Deployment, externa return nil } - if err := validateTolerationsConfig(tolerations, field.NewPath("spec", "externalSecretsConfig", "tolerations")); err != nil { + if err := validateTolerationsConfig(tolerations, field.NewPath("spec", "tolerations")); err != nil { return err } @@ -291,18 +295,20 @@ func validateTolerationsConfig(tolerations []corev1.Toleration, fldPath *field.P return corevalidation.ValidateTolerations(convTolerations, fldPath.Child("tolerations")).ToAggregate() } -func (r *Reconciler) updateImageInStatus(externalsecrets *operatorv1alpha1.ExternalSecrets) error { - image := os.Getenv(externalsecretsImageEnvVarName) - if externalsecrets.Status.ExternalSecretsImage != image { - externalsecrets.Status.ExternalSecretsImage = image - return r.updateStatus(r.ctx, externalsecrets) +func (r *Reconciler) updateImageInStatus(esc *operatorv1alpha1.ExternalSecretsConfig) error { + externalSecretsImage := os.Getenv(externalsecretsImageEnvVarName) + bitwardenImage := os.Getenv(bitwardenImageEnvVarName) + if esc.Status.ExternalSecretsImage != externalSecretsImage || esc.Status.BitwardenSDKServerImage != bitwardenImage { + esc.Status.ExternalSecretsImage = externalSecretsImage + esc.Status.BitwardenSDKServerImage = bitwardenImage + return r.updateStatus(r.ctx, esc) } return nil } // argument list for external-secrets deployment resource -func updateContainerSpec(deployment *appsv1.Deployment, externalsecrets *operatorv1alpha1.ExternalSecrets, image, logLevel string) { - namespace := getOperatingNamespace(externalsecrets) +func updateContainerSpec(deployment *appsv1.Deployment, esc *operatorv1alpha1.ExternalSecretsConfig, image, logLevel string) { + namespace := getOperatingNamespace(esc) args := []string{ "--concurrent=1", "--metrics-addr=:8080", @@ -329,13 +335,13 @@ func updateContainerSpec(deployment *appsv1.Deployment, externalsecrets *operato } // argument list for webhook deployment resource -func updateWebhookContainerSpec(deployment *appsv1.Deployment, image, logLevel string) { +func updateWebhookContainerSpec(deployment *appsv1.Deployment, image, logLevel, checkInterval string) { args := []string{ "webhook", fmt.Sprintf("--dns-name=external-secrets-webhook.%s.svc", deployment.GetNamespace()), "--port=10250", "--cert-dir=/tmp/certs", - "--check-interval=5m", + fmt.Sprintf("--check-interval=%s", checkInterval), "--metrics-addr=:8080", "--healthz-addr=:8081", fmt.Sprintf("--loglevel=%s", logLevel), diff --git a/pkg/controller/external_secrets/deployments_test.go b/pkg/controller/external_secrets/deployments_test.go index e8cbe664..7b48f6ba 100644 --- a/pkg/controller/external_secrets/deployments_test.go +++ b/pkg/controller/external_secrets/deployments_test.go @@ -20,11 +20,11 @@ import ( func TestCreateOrApplyDeployments(t *testing.T) { tests := []struct { - name string - preReq func(*Reconciler, *fakes.FakeCtrlClient) - updateExternalSecrets func(*v1alpha1.ExternalSecrets) - skipEnvVar bool - wantErr string + name string + preReq func(*Reconciler, *fakes.FakeCtrlClient) + updateExternalSecretsConfig func(*v1alpha1.ExternalSecretsConfig) + skipEnvVar bool + wantErr string }{ { name: "deployment reconciliation successful", @@ -38,7 +38,7 @@ func TestCreateOrApplyDeployments(t *testing.T) { return true, nil }) }, - updateExternalSecrets: func(i *v1alpha1.ExternalSecrets) { + updateExternalSecretsConfig: func(i *v1alpha1.ExternalSecretsConfig) { i.Status.ExternalSecretsImage = commontest.TestExternalSecretsImageName }, }, @@ -104,11 +104,8 @@ func TestCreateOrApplyDeployments(t *testing.T) { return true, nil }) }, - updateExternalSecrets: func(i *v1alpha1.ExternalSecrets) { - if i.Spec.ExternalSecretsConfig == nil { - i.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{} - } - i.Spec.ExternalSecretsConfig.Affinity = &corev1.Affinity{ + updateExternalSecretsConfig: func(i *v1alpha1.ExternalSecretsConfig) { + i.Spec.ApplicationConfig.Affinity = &corev1.Affinity{ NodeAffinity: &corev1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ NodeSelectorTerms: []corev1.NodeSelectorTerm{ @@ -160,7 +157,7 @@ func TestCreateOrApplyDeployments(t *testing.T) { }, }, } - i.Spec.ExternalSecretsConfig.Tolerations = []corev1.Toleration{ + i.Spec.ApplicationConfig.Tolerations = []corev1.Toleration{ { Key: "type", Operator: corev1.TolerationOpEqual, @@ -168,8 +165,8 @@ func TestCreateOrApplyDeployments(t *testing.T) { Effect: corev1.TaintEffectNoSchedule, }, } - i.Spec.ExternalSecretsConfig.NodeSelector = map[string]string{"type": "test"} - i.Spec.ExternalSecretsConfig.Resources = corev1.ResourceRequirements{ + i.Spec.ApplicationConfig.NodeSelector = map[string]string{"type": "test"} + i.Spec.ApplicationConfig.Resources = &corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("100m"), corev1.ResourceMemory: resource.MustParse("100Mi"), @@ -201,13 +198,13 @@ func TestCreateOrApplyDeployments(t *testing.T) { }) m.StatusUpdateCalls(func(ctx context.Context, obj client.Object, _ ...client.SubResourceUpdateOption) error { switch obj.(type) { - case *v1alpha1.ExternalSecrets: + case *v1alpha1.ExternalSecretsConfig: return commontest.TestClientError } return nil }) }, - wantErr: `failed to update /cluster status with image info: failed to update externalsecrets.openshift.operator.io "/cluster" status: test client error`, + wantErr: `failed to update /cluster status with image info: failed to update externalsecretsconfigs.operator.openshift.io "/cluster" status: test client error`, }, { name: "deployment reconciliation with invalid toleration configuration", @@ -221,11 +218,8 @@ func TestCreateOrApplyDeployments(t *testing.T) { return true, nil }) }, - updateExternalSecrets: func(i *v1alpha1.ExternalSecrets) { - if i.Spec.ExternalSecretsConfig == nil { - i.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{} - } - i.Spec.ExternalSecretsConfig.Tolerations = []corev1.Toleration{ + updateExternalSecretsConfig: func(i *v1alpha1.ExternalSecretsConfig) { + i.Spec.ApplicationConfig.Tolerations = []corev1.Toleration{ { Operator: corev1.TolerationOpExists, Value: "test", @@ -233,7 +227,7 @@ func TestCreateOrApplyDeployments(t *testing.T) { }, } }, - wantErr: "failed to update pod tolerations: spec.externalSecretsConfig.tolerations.tolerations[0].operator: Invalid value: \"test\": value must be empty when `operator` is 'Exists'", + wantErr: "failed to update pod tolerations: spec.tolerations.tolerations[0].operator: Invalid value: \"test\": value must be empty when `operator` is 'Exists'", }, { name: "deployment reconciliation with invalid nodeSelector configuration", @@ -247,13 +241,10 @@ func TestCreateOrApplyDeployments(t *testing.T) { return true, nil }) }, - updateExternalSecrets: func(i *v1alpha1.ExternalSecrets) { - if i.Spec.ExternalSecretsConfig == nil { - i.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{} - } - i.Spec.ExternalSecretsConfig.NodeSelector = map[string]string{"node/Label/2": "value2"} + updateExternalSecretsConfig: func(i *v1alpha1.ExternalSecretsConfig) { + i.Spec.ApplicationConfig.NodeSelector = map[string]string{"node/Label/2": "value2"} }, - wantErr: `failed to update node selector: spec.externalSecretsConfig.nodeSelector: Invalid value: "node/Label/2": a qualified name must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')`, + wantErr: `failed to update node selector: spec.nodeSelector: Invalid value: "node/Label/2": a qualified name must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')`, }, { name: "deployment reconciliation with invalid affinity configuration", @@ -267,12 +258,8 @@ func TestCreateOrApplyDeployments(t *testing.T) { return true, nil }) }, - updateExternalSecrets: func(i *v1alpha1.ExternalSecrets) { - if i.Spec.ExternalSecretsConfig == nil { - i.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{} - } - - i.Spec.ExternalSecretsConfig.Affinity = &corev1.Affinity{ + updateExternalSecretsConfig: func(i *v1alpha1.ExternalSecretsConfig) { + i.Spec.ApplicationConfig.Affinity = &corev1.Affinity{ NodeAffinity: &corev1.NodeAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ NodeSelectorTerms: []corev1.NodeSelectorTerm{ @@ -322,7 +309,7 @@ func TestCreateOrApplyDeployments(t *testing.T) { }, } }, - wantErr: "failed to update affinity rules: [spec.externalSecretsConfig.affinity.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values: Required value: must be specified when `operator` is 'In' or 'NotIn', spec.externalSecretsConfig.affinity.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey: Required value: can not be empty, spec.externalSecretsConfig.affinity.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey: Invalid value: \"\": name part must be non-empty, spec.externalSecretsConfig.affinity.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey: Invalid value: \"\": name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'), spec.externalSecretsConfig.affinity.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey: Required value: can not be empty, spec.externalSecretsConfig.affinity.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey: Invalid value: \"\": name part must be non-empty, spec.externalSecretsConfig.affinity.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey: Invalid value: \"\": name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')]", + wantErr: "failed to update affinity rules: [spec.affinity.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms[0].matchExpressions[0].values: Required value: must be specified when `operator` is 'In' or 'NotIn', spec.affinity.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey: Required value: can not be empty, spec.affinity.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey: Invalid value: \"\": name part must be non-empty, spec.affinity.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution[0].topologyKey: Invalid value: \"\": name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]'), spec.affinity.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey: Required value: can not be empty, spec.affinity.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey: Invalid value: \"\": name part must be non-empty, spec.affinity.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution[0].podAffinityTerm.topologyKey: Invalid value: \"\": name part must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]')]", }, { name: "deployment reconciliation with invalid resource requirement configuration", @@ -336,11 +323,8 @@ func TestCreateOrApplyDeployments(t *testing.T) { return true, nil }) }, - updateExternalSecrets: func(i *v1alpha1.ExternalSecrets) { - if i.Spec.ExternalSecretsConfig == nil { - i.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{} - } - i.Spec.ExternalSecretsConfig.Resources = corev1.ResourceRequirements{ + updateExternalSecretsConfig: func(i *v1alpha1.ExternalSecretsConfig) { + i.Spec.ApplicationConfig.Resources = &corev1.ResourceRequirements{ Requests: corev1.ResourceList{ corev1.ResourceCPU: resource.MustParse("100m"), corev1.ResourceMemory: resource.MustParse("100Mi"), @@ -369,13 +353,13 @@ func TestCreateOrApplyDeployments(t *testing.T) { return commontest.TestClientError }) }, - updateExternalSecrets: func(i *v1alpha1.ExternalSecrets) { - if i.Spec.ExternalSecretsConfig == nil { - i.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &v1alpha1.CertManagerConfig{ - Enabled: "true", + updateExternalSecretsConfig: func(i *v1alpha1.ExternalSecretsConfig) { + i.Spec.ControllerConfig = v1alpha1.ControllerConfig{ + CertProvider: &v1alpha1.CertProvidersConfig{ + CertManager: &v1alpha1.CertManagerConfig{ + Mode: v1alpha1.Enabled, }, - } + }, } }, wantErr: `failed to delete deployment resource: test client error`, @@ -395,13 +379,13 @@ func TestCreateOrApplyDeployments(t *testing.T) { return errors.NewNotFound(schema.GroupResource{}, obj.GetName()) }) }, - updateExternalSecrets: func(i *v1alpha1.ExternalSecrets) { - if i.Spec.ExternalSecretsConfig == nil { - i.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &v1alpha1.CertManagerConfig{ - Enabled: "true", + updateExternalSecretsConfig: func(i *v1alpha1.ExternalSecretsConfig) { + i.Spec.ControllerConfig = v1alpha1.ControllerConfig{ + CertProvider: &v1alpha1.CertProvidersConfig{ + CertManager: &v1alpha1.CertManagerConfig{ + Mode: v1alpha1.Enabled, }, - } + }, } }, }, @@ -415,10 +399,10 @@ func TestCreateOrApplyDeployments(t *testing.T) { tt.preReq(r, mock) } r.CtrlClient = mock - externalsecrets := commontest.TestExternalSecrets() + externalsecrets := commontest.TestExternalSecretsConfig() - if tt.updateExternalSecrets != nil { - tt.updateExternalSecrets(externalsecrets) + if tt.updateExternalSecretsConfig != nil { + tt.updateExternalSecretsConfig(externalsecrets) } if !tt.skipEnvVar { t.Setenv("RELATED_IMAGE_EXTERNAL_SECRETS", commontest.TestExternalSecretsImageName) @@ -429,12 +413,8 @@ func TestCreateOrApplyDeployments(t *testing.T) { if (tt.wantErr != "" || err != nil) && (err == nil || err.Error() != tt.wantErr) { t.Errorf("createOrApplyDeployments() err: %v, wantErr: %v", err, tt.wantErr) } - if tt.wantErr == "" { - if tt.wantErr == "" { - if externalsecrets.Status.ExternalSecretsImage != commontest.TestExternalSecretsImageName { - t.Errorf("createOrApplyDeployments() got image in status: %v, want: %v", externalsecrets.Status.ExternalSecretsImage, "test-image") - } - } + if tt.wantErr == "" && externalsecrets.Status.ExternalSecretsImage != commontest.TestExternalSecretsImageName { + t.Errorf("createOrApplyDeployments() got image in status: %v, want: %v", externalsecrets.Status.ExternalSecretsImage, "test-image") } }) } diff --git a/pkg/controller/external_secrets/install_external_secrets.go b/pkg/controller/external_secrets/install_external_secrets.go index 84473670..b7b6ba44 100644 --- a/pkg/controller/external_secrets/install_external_secrets.go +++ b/pkg/controller/external_secrets/install_external_secrets.go @@ -20,14 +20,14 @@ var ( disallowedLabelMatcher = regexp.MustCompile(`^app.kubernetes.io\/|^external-secrets.io\/|^rbac.authorization.k8s.io\/|^servicebinding.io\/controller$|^app$`) ) -func (r *Reconciler) reconcileExternalSecretsDeployment(es *operatorv1alpha1.ExternalSecrets, recon bool) error { - if err := r.validateExternalSecretsConfig(es); err != nil { - return common.NewIrrecoverableError(err, "%s/%s configuration validation failed", es.GetObjectKind().GroupVersionKind().String(), es.GetName()) +func (r *Reconciler) reconcileExternalSecretsDeployment(esc *operatorv1alpha1.ExternalSecretsConfig, recon bool) error { + if err := r.validateExternalSecretsConfig(esc); err != nil { + return common.NewIrrecoverableError(err, "%s/%s configuration validation failed", esc.GetObjectKind().GroupVersionKind().String(), esc.GetName()) } // if user has set custom labels to be added to all resources created by the controller // merge it with the controller's own default labels. Labels defined in `ExternalSecretsManager` - // Spec will have the lowest priority, followed by the labels in `ExternalSecrets` Spec and + // Spec will have the lowest priority, followed by the labels in `ExternalSecretsConfig` Spec and // controllerDefaultResourceLabels will have the highest priority. resourceLabels := make(map[string]string) if !common.IsESMSpecEmpty(r.esm) && r.esm.Spec.GlobalConfig != nil { @@ -39,10 +39,10 @@ func (r *Reconciler) reconcileExternalSecretsDeployment(es *operatorv1alpha1.Ext resourceLabels[k] = v } } - if es.Spec.ControllerConfig != nil && len(es.Spec.ControllerConfig.Labels) != 0 { - for k, v := range es.Spec.ControllerConfig.Labels { + if len(esc.Spec.ControllerConfig.Labels) != 0 { + for k, v := range esc.Spec.ControllerConfig.Labels { if disallowedLabelMatcher.MatchString(k) { - r.log.V(1).Info("skip adding unallowed label configured in externalsecrets.operator.openshift.io", "label", k, "value", v) + r.log.V(1).Info("skip adding unallowed label configured in externalsecretsconfig.operator.openshift.io", "label", k, "value", v) continue } resourceLabels[k] = v @@ -52,59 +52,60 @@ func (r *Reconciler) reconcileExternalSecretsDeployment(es *operatorv1alpha1.Ext resourceLabels[k] = v } - if err := r.createOrApplyNamespace(es, resourceLabels); err != nil { + if err := r.createOrApplyNamespace(esc, resourceLabels); err != nil { r.log.Error(err, "failed to create namespace") + return err } - if err := r.createOrApplyServiceAccounts(es, resourceLabels, recon); err != nil { + if err := r.createOrApplyServiceAccounts(esc, resourceLabels, recon); err != nil { r.log.Error(err, "failed to reconcile serviceaccount resource") return err } - if err := r.createOrApplyCertificates(es, resourceLabels, recon); err != nil { + if err := r.createOrApplyCertificates(esc, resourceLabels, recon); err != nil { r.log.Error(err, "failed to reconcile certificates resource") return err } - if err := r.createOrApplySecret(es, resourceLabels, recon); err != nil { + if err := r.createOrApplySecret(esc, resourceLabels, recon); err != nil { r.log.Error(err, "failed to reconcile secret resource") return err } - if err := r.createOrApplyRBACResource(es, resourceLabels, recon); err != nil { + if err := r.createOrApplyRBACResource(esc, resourceLabels, recon); err != nil { r.log.Error(err, "failed to reconcile rbac resources") return err } - if err := r.createOrApplyServices(es, resourceLabels, recon); err != nil { + if err := r.createOrApplyServices(esc, resourceLabels, recon); err != nil { r.log.Error(err, "failed to reconcile service resource") return err } - if err := r.createOrApplyDeployments(es, resourceLabels, recon); err != nil { + if err := r.createOrApplyDeployments(esc, resourceLabels, recon); err != nil { r.log.Error(err, "failed to reconcile deployment resource") return err } - if err := r.createOrApplyValidatingWebhookConfiguration(es, resourceLabels, recon); err != nil { + if err := r.createOrApplyValidatingWebhookConfiguration(esc, resourceLabels, recon); err != nil { r.log.Error(err, "failed to reconcile validating webhook resource") return err } - if addProcessedAnnotation(es) { - if err := r.UpdateWithRetry(r.ctx, es); err != nil { - return fmt.Errorf("failed to update processed annotation to %s: %w", es.GetName(), err) + if addProcessedAnnotation(esc) { + if err := r.UpdateWithRetry(r.ctx, esc); err != nil { + return fmt.Errorf("failed to update processed annotation to %s: %w", esc.GetName(), err) } } - r.log.V(4).Info("finished reconciliation of external-secrets", "namespace", es.GetNamespace(), "name", es.GetName()) + r.log.V(4).Info("finished reconciliation of external-secrets", "namespace", esc.GetNamespace(), "name", esc.GetName()) return nil } // createOrApplyNamespace is for the creating the namespace in which the `external-secrets` // resources will be created. -func (r *Reconciler) createOrApplyNamespace(es *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string) error { - namespace := getNamespace(es) +func (r *Reconciler) createOrApplyNamespace(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string) error { + namespace := getNamespace(esc) obj := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: namespace, diff --git a/pkg/controller/external_secrets/rbacs.go b/pkg/controller/external_secrets/rbacs.go index 15b9439f..8b47c582 100644 --- a/pkg/controller/external_secrets/rbacs.go +++ b/pkg/controller/external_secrets/rbacs.go @@ -21,15 +21,15 @@ const ( // createOrApplyRBACResource is for creating all the RBAC specific resources // required for installing external-secrets operand. -func (r *Reconciler) createOrApplyRBACResource(es *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string, recon bool) error { +func (r *Reconciler) createOrApplyRBACResource(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string, recon bool) error { serviceAccountName := common.DecodeServiceAccountObjBytes(assets.MustAsset(controllerServiceAccountAssetName)).GetName() - if err := r.createOrApplyControllerRBACResources(es, serviceAccountName, resourceLabels, recon); err != nil { + if err := r.createOrApplyControllerRBACResources(esc, serviceAccountName, resourceLabels, recon); err != nil { r.log.Error(err, "failed to reconcile controller rbac resources") return err } - if err := r.createOrApplyCertControllerRBACResources(es, serviceAccountName, resourceLabels, recon); err != nil { + if err := r.createOrApplyCertControllerRBACResources(esc, serviceAccountName, resourceLabels, recon); err != nil { r.log.Error(err, "failed to reconcile cert-controller rbac resources") return err } @@ -39,7 +39,7 @@ func (r *Reconciler) createOrApplyRBACResource(es *operatorv1alpha1.ExternalSecr // createOrApplyControllerRBACResources is for creating all RBAC resources required by // the main external-secrets operand controller. -func (r *Reconciler) createOrApplyControllerRBACResources(es *operatorv1alpha1.ExternalSecrets, serviceAccountName string, resourceLabels map[string]string, recon bool) error { +func (r *Reconciler) createOrApplyControllerRBACResources(esc *operatorv1alpha1.ExternalSecretsConfig, serviceAccountName string, resourceLabels map[string]string, recon bool) error { for _, asset := range []string{ controllerClusterRoleAssetName, controllerClusterRoleEditAssetName, @@ -47,27 +47,27 @@ func (r *Reconciler) createOrApplyControllerRBACResources(es *operatorv1alpha1.E controllerClusterRoleViewAssetName, } { clusterRoleObj := r.getClusterRoleObject(asset, resourceLabels) - if err := r.createOrApplyClusterRole(es, clusterRoleObj, recon); err != nil { + if err := r.createOrApplyClusterRole(esc, clusterRoleObj, recon); err != nil { r.log.Error(err, "failed to reconcile controller clusterrole resources") return err } } clusterRoleName := common.DecodeClusterRoleObjBytes(assets.MustAsset(controllerClusterRoleAssetName)).GetName() - clusterRoleBindingObj := r.getClusterRoleBindingObject(es, controllerClusterRoleBindingAssetName, clusterRoleName, serviceAccountName, resourceLabels) - if err := r.createOrApplyClusterRoleBinding(es, clusterRoleBindingObj, recon); err != nil { + clusterRoleBindingObj := r.getClusterRoleBindingObject(esc, controllerClusterRoleBindingAssetName, clusterRoleName, serviceAccountName, resourceLabels) + if err := r.createOrApplyClusterRoleBinding(esc, clusterRoleBindingObj, recon); err != nil { r.log.Error(err, "failed to reconcile controller clusterrolebinding resources") return err } - roleObj := r.getRoleObject(es, controllerRoleLeaderElectionAssetName, resourceLabels) - if err := r.createOrApplyRole(es, roleObj, recon); err != nil { + roleObj := r.getRoleObject(esc, controllerRoleLeaderElectionAssetName, resourceLabels) + if err := r.createOrApplyRole(esc, roleObj, recon); err != nil { r.log.Error(err, "failed to reconcile controller role resources") return err } - roleBindingObj := r.getRoleBindingObject(es, controllerRoleBindingLeaderElectionAssetName, roleObj.GetName(), serviceAccountName, resourceLabels) - if err := r.createOrApplyRoleBinding(es, roleBindingObj, recon); err != nil { + roleBindingObj := r.getRoleBindingObject(esc, controllerRoleBindingLeaderElectionAssetName, roleObj.GetName(), serviceAccountName, resourceLabels) + if err := r.createOrApplyRoleBinding(esc, roleBindingObj, recon); err != nil { r.log.Error(err, "failed to reconcile controller rolebinding resources") return err } @@ -77,8 +77,8 @@ func (r *Reconciler) createOrApplyControllerRBACResources(es *operatorv1alpha1.E // createOrApplyCertControllerRBACResources is for creating all RBAC resources required by // the main external-secrets operand cert-controller. -func (r *Reconciler) createOrApplyCertControllerRBACResources(es *operatorv1alpha1.ExternalSecrets, serviceAccountName string, resourceLabels map[string]string, recon bool) error { - if isCertManagerConfigEnabled(es) { +func (r *Reconciler) createOrApplyCertControllerRBACResources(esc *operatorv1alpha1.ExternalSecretsConfig, serviceAccountName string, resourceLabels map[string]string, recon bool) error { + if isCertManagerConfigEnabled(esc) { r.log.V(4).Info("deleting cert-controller rbac resources if exists, as cert-manager config is enabled") for asset, assetType := range map[string]client.Object{ certControllerClusterRoleAssetName: &rbacv1.ClusterRole{}, @@ -92,13 +92,13 @@ func (r *Reconciler) createOrApplyCertControllerRBACResources(es *operatorv1alph } clusterRoleObj := r.getClusterRoleObject(certControllerClusterRoleAssetName, resourceLabels) - if err := r.createOrApplyClusterRole(es, clusterRoleObj, recon); err != nil { + if err := r.createOrApplyClusterRole(esc, clusterRoleObj, recon); err != nil { r.log.Error(err, "failed to reconcile cert-controller clusterrole resources") return err } - clusterRoleBindingObj := r.getClusterRoleBindingObject(es, certControllerClusterRoleBindingAssetName, clusterRoleObj.GetName(), serviceAccountName, resourceLabels) - if err := r.createOrApplyClusterRoleBinding(es, clusterRoleBindingObj, recon); err != nil { + clusterRoleBindingObj := r.getClusterRoleBindingObject(esc, certControllerClusterRoleBindingAssetName, clusterRoleObj.GetName(), serviceAccountName, resourceLabels) + if err := r.createOrApplyClusterRoleBinding(esc, clusterRoleBindingObj, recon); err != nil { r.log.Error(err, "failed to reconcile cert-controller clusterrolebinding resources") return err } @@ -107,7 +107,7 @@ func (r *Reconciler) createOrApplyCertControllerRBACResources(es *operatorv1alph } // createOrApplyClusterRole creates or updates given ClusterRole object. -func (r *Reconciler) createOrApplyClusterRole(es *operatorv1alpha1.ExternalSecrets, obj *rbacv1.ClusterRole, recon bool) error { +func (r *Reconciler) createOrApplyClusterRole(esc *operatorv1alpha1.ExternalSecretsConfig, obj *rbacv1.ClusterRole, recon bool) error { var ( exist bool err error @@ -125,14 +125,14 @@ func (r *Reconciler) createOrApplyClusterRole(es *operatorv1alpha1.ExternalSecre } if exist && recon { - r.eventRecorder.Eventf(es, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s clusterrole resource already exists, maybe from previous installation", clusterRoleName) + r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s clusterrole resource already exists, maybe from previous installation", clusterRoleName) } if exist && common.HasObjectChanged(obj, fetched) { r.log.V(1).Info("clusterrole has been modified, updating to desired state", "name", clusterRoleName) if err := r.UpdateWithRetry(r.ctx, obj); err != nil { return common.FromClientError(err, "failed to update %s clusterrole resource", clusterRoleName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "clusterrole resource %s reconciled back to desired state", clusterRoleName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "clusterrole resource %s reconciled back to desired state", clusterRoleName) } else { r.log.V(4).Info("clusterrole resource already exists and is in expected state", "name", clusterRoleName) } @@ -140,7 +140,7 @@ func (r *Reconciler) createOrApplyClusterRole(es *operatorv1alpha1.ExternalSecre if err := r.Create(r.ctx, obj); err != nil { return common.FromClientError(err, "failed to create %s clusterrole resource", clusterRoleName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "clusterrole resource %s created", clusterRoleName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "clusterrole resource %s created", clusterRoleName) } return nil @@ -155,7 +155,7 @@ func (r *Reconciler) getClusterRoleObject(assetName string, resourceLabels map[s } // createOrApplyClusterRoleBinding creates or updates given ClusterRoleBinding object. -func (r *Reconciler) createOrApplyClusterRoleBinding(es *operatorv1alpha1.ExternalSecrets, obj *rbacv1.ClusterRoleBinding, recon bool) error { +func (r *Reconciler) createOrApplyClusterRoleBinding(esc *operatorv1alpha1.ExternalSecretsConfig, obj *rbacv1.ClusterRoleBinding, recon bool) error { var ( exist bool err error @@ -173,14 +173,14 @@ func (r *Reconciler) createOrApplyClusterRoleBinding(es *operatorv1alpha1.Extern } if exist && recon { - r.eventRecorder.Eventf(es, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s clusterrolebinding resource already exists, maybe from previous installation", clusterRoleBindingName) + r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s clusterrolebinding resource already exists, maybe from previous installation", clusterRoleBindingName) } if exist && common.HasObjectChanged(obj, fetched) { r.log.V(1).Info("clusterrolebinding has been modified, updating to desired state", "name", clusterRoleBindingName) if err := r.UpdateWithRetry(r.ctx, obj); err != nil { return common.FromClientError(err, "failed to update %s clusterrolebinding resource", clusterRoleBindingName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "clusterrolebinding resource %s reconciled back to desired state", clusterRoleBindingName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "clusterrolebinding resource %s reconciled back to desired state", clusterRoleBindingName) } else { r.log.V(4).Info("clusterrolebinding resource already exists and is in expected state", "name", clusterRoleBindingName) } @@ -188,7 +188,7 @@ func (r *Reconciler) createOrApplyClusterRoleBinding(es *operatorv1alpha1.Extern if err := r.Create(r.ctx, obj); err != nil { return common.FromClientError(err, "failed to create %s clusterrolebinding resource", clusterRoleBindingName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "clusterrolebinding resource %s created", clusterRoleBindingName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "clusterrolebinding resource %s created", clusterRoleBindingName) } return nil @@ -196,16 +196,16 @@ func (r *Reconciler) createOrApplyClusterRoleBinding(es *operatorv1alpha1.Extern // getClusterRoleBindingObject is for obtaining the content of given ClusterRoleBinding static asset, and // then updating it with desired values. -func (r *Reconciler) getClusterRoleBindingObject(es *operatorv1alpha1.ExternalSecrets, assetName, clusterRoleName, serviceAccountName string, resourceLabels map[string]string) *rbacv1.ClusterRoleBinding { +func (r *Reconciler) getClusterRoleBindingObject(esc *operatorv1alpha1.ExternalSecretsConfig, assetName, clusterRoleName, serviceAccountName string, resourceLabels map[string]string) *rbacv1.ClusterRoleBinding { clusterRoleBinding := common.DecodeClusterRoleBindingObjBytes(assets.MustAsset(assetName)) clusterRoleBinding.RoleRef.Name = clusterRoleName common.UpdateResourceLabels(clusterRoleBinding, resourceLabels) - updateServiceAccountNamespaceInRBACBindingObject[*rbacv1.ClusterRoleBinding](clusterRoleBinding, serviceAccountName, getNamespace(es)) + updateServiceAccountNamespaceInRBACBindingObject[*rbacv1.ClusterRoleBinding](clusterRoleBinding, serviceAccountName, getNamespace(esc)) return clusterRoleBinding } // createOrApplyRole creates or updates given Role object. -func (r *Reconciler) createOrApplyRole(es *operatorv1alpha1.ExternalSecrets, obj *rbacv1.Role, recon bool) error { +func (r *Reconciler) createOrApplyRole(esc *operatorv1alpha1.ExternalSecretsConfig, obj *rbacv1.Role, recon bool) error { roleName := fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName()) r.log.V(4).Info("reconciling role resource", "name", roleName) fetched := &rbacv1.Role{} @@ -219,14 +219,14 @@ func (r *Reconciler) createOrApplyRole(es *operatorv1alpha1.ExternalSecrets, obj } if exist && recon { - r.eventRecorder.Eventf(es, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s role resource already exists, maybe from previous installation", roleName) + r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s role resource already exists, maybe from previous installation", roleName) } if exist && common.HasObjectChanged(obj, fetched) { r.log.V(1).Info("role has been modified, updating to desired state", "name", roleName) if err := r.UpdateWithRetry(r.ctx, obj); err != nil { return common.FromClientError(err, "failed to update %s role resource", roleName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "role resource %s reconciled back to desired state", roleName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "role resource %s reconciled back to desired state", roleName) } else { r.log.V(4).Info("role resource already exists and is in expected state", "name", roleName) } @@ -234,7 +234,7 @@ func (r *Reconciler) createOrApplyRole(es *operatorv1alpha1.ExternalSecrets, obj if err := r.Create(r.ctx, obj); err != nil { return common.FromClientError(err, "failed to create %s role resource", roleName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "role resource %s created", roleName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "role resource %s created", roleName) } return nil @@ -242,15 +242,15 @@ func (r *Reconciler) createOrApplyRole(es *operatorv1alpha1.ExternalSecrets, obj // getRoleObject is for obtaining the content of given Role static asset, and // then updating it with desired values. -func (r *Reconciler) getRoleObject(es *operatorv1alpha1.ExternalSecrets, assetName string, resourceLabels map[string]string) *rbacv1.Role { +func (r *Reconciler) getRoleObject(esc *operatorv1alpha1.ExternalSecretsConfig, assetName string, resourceLabels map[string]string) *rbacv1.Role { role := common.DecodeRoleObjBytes(assets.MustAsset(assetName)) - updateNamespace(role, es) + updateNamespace(role, esc) common.UpdateResourceLabels(role, resourceLabels) return role } // createOrApplyRoleBinding creates or updates given RoleBinding object. -func (r *Reconciler) createOrApplyRoleBinding(es *operatorv1alpha1.ExternalSecrets, obj *rbacv1.RoleBinding, recon bool) error { +func (r *Reconciler) createOrApplyRoleBinding(esc *operatorv1alpha1.ExternalSecretsConfig, obj *rbacv1.RoleBinding, recon bool) error { roleBindingName := fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName()) r.log.V(4).Info("reconciling rolebinding resource", "name", roleBindingName) fetched := &rbacv1.RoleBinding{} @@ -264,14 +264,14 @@ func (r *Reconciler) createOrApplyRoleBinding(es *operatorv1alpha1.ExternalSecre } if exist && recon { - r.eventRecorder.Eventf(es, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s rolebinding resource already exists, maybe from previous installation", roleBindingName) + r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s rolebinding resource already exists, maybe from previous installation", roleBindingName) } if exist && common.HasObjectChanged(obj, fetched) { r.log.V(1).Info("rolebinding has been modified, updating to desired state", "name", roleBindingName) if err := r.UpdateWithRetry(r.ctx, obj); err != nil { return common.FromClientError(err, "failed to update %s rolebinding resource", roleBindingName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "rolebinding resource %s reconciled back to desired state", roleBindingName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "rolebinding resource %s reconciled back to desired state", roleBindingName) } else { r.log.V(4).Info("rolebinding resource already exists and is in expected state", "name", roleBindingName) @@ -280,7 +280,7 @@ func (r *Reconciler) createOrApplyRoleBinding(es *operatorv1alpha1.ExternalSecre if err := r.Create(r.ctx, obj); err != nil { return common.FromClientError(err, "failed to create %s rolebinding resource", roleBindingName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "rolebinding resource %s created", roleBindingName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "rolebinding resource %s created", roleBindingName) } return nil @@ -288,10 +288,10 @@ func (r *Reconciler) createOrApplyRoleBinding(es *operatorv1alpha1.ExternalSecre // getRoleBindingObject is for obtaining the content of given RoleBinding static asset, and // then updating it with desired values. -func (r *Reconciler) getRoleBindingObject(es *operatorv1alpha1.ExternalSecrets, assetName, roleName, serviceAccountName string, resourceLabels map[string]string) *rbacv1.RoleBinding { +func (r *Reconciler) getRoleBindingObject(esc *operatorv1alpha1.ExternalSecretsConfig, assetName, roleName, serviceAccountName string, resourceLabels map[string]string) *rbacv1.RoleBinding { roleBinding := common.DecodeRoleBindingObjBytes(assets.MustAsset(assetName)) roleBinding.RoleRef.Name = roleName - updateNamespace(roleBinding, es) + updateNamespace(roleBinding, esc) common.UpdateResourceLabels(roleBinding, resourceLabels) updateServiceAccountNamespaceInRBACBindingObject[*rbacv1.RoleBinding](roleBinding, serviceAccountName, roleBinding.GetNamespace()) return roleBinding diff --git a/pkg/controller/external_secrets/rbacs_test.go b/pkg/controller/external_secrets/rbacs_test.go index b35364bb..83451a4f 100644 --- a/pkg/controller/external_secrets/rbacs_test.go +++ b/pkg/controller/external_secrets/rbacs_test.go @@ -15,10 +15,10 @@ import ( func TestCreateOrApplyRBACResource(t *testing.T) { tests := []struct { - name string - preReq func(*Reconciler, *fakes.FakeCtrlClient) - updateExternalSecretsObj func(*operatorv1alpha1.ExternalSecrets) - wantErr string + name string + preReq func(*Reconciler, *fakes.FakeCtrlClient) + updateExternalSecretsConfig func(*operatorv1alpha1.ExternalSecretsConfig) + wantErr string }{ { name: "clusterrole reconciliation fails while checking if exists", @@ -31,9 +31,9 @@ func TestCreateOrApplyRBACResource(t *testing.T) { return true, nil }) }, - updateExternalSecretsObj: func(es *operatorv1alpha1.ExternalSecrets) { - es.Spec.ControllerConfig = &operatorv1alpha1.ControllerConfig{ - Namespace: "test-external-secrets", + updateExternalSecretsConfig: func(esc *operatorv1alpha1.ExternalSecretsConfig) { + esc.Spec = operatorv1alpha1.ExternalSecretsConfigSpec{ + ControllerConfig: operatorv1alpha1.ControllerConfig{}, } }, wantErr: `failed to check external-secrets-controller clusterrole resource already exists: test client error`, @@ -265,20 +265,28 @@ func TestCreateOrApplyRBACResource(t *testing.T) { }, { name: "clusterroles creation successful", - updateExternalSecretsObj: func(es *operatorv1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &operatorv1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &operatorv1alpha1.CertManagerConfig{ - Enabled: "true", + updateExternalSecretsConfig: func(esc *operatorv1alpha1.ExternalSecretsConfig) { + esc.Spec = operatorv1alpha1.ExternalSecretsConfigSpec{ + ControllerConfig: operatorv1alpha1.ControllerConfig{ + CertProvider: &operatorv1alpha1.CertProvidersConfig{ + CertManager: &operatorv1alpha1.CertManagerConfig{ + Mode: operatorv1alpha1.Enabled, + }, + }, }, } }, }, { name: "clusterrolebindings creation successful", - updateExternalSecretsObj: func(es *operatorv1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &operatorv1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &operatorv1alpha1.CertManagerConfig{ - Enabled: "true", + updateExternalSecretsConfig: func(esc *operatorv1alpha1.ExternalSecretsConfig) { + esc.Spec = operatorv1alpha1.ExternalSecretsConfigSpec{ + ControllerConfig: operatorv1alpha1.ControllerConfig{ + CertProvider: &operatorv1alpha1.CertProvidersConfig{ + CertManager: &operatorv1alpha1.CertManagerConfig{ + Mode: operatorv1alpha1.Enabled, + }, + }, }, } }, @@ -304,13 +312,13 @@ func TestCreateOrApplyRBACResource(t *testing.T) { return commontest.TestClientError }) }, - updateExternalSecretsObj: func(es *operatorv1alpha1.ExternalSecrets) { - if es.Spec.ExternalSecretsConfig == nil { - es.Spec.ExternalSecretsConfig = &operatorv1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &operatorv1alpha1.CertManagerConfig{ - Enabled: "true", + updateExternalSecretsConfig: func(es *operatorv1alpha1.ExternalSecretsConfig) { + es.Spec.ControllerConfig = operatorv1alpha1.ControllerConfig{ + CertProvider: &operatorv1alpha1.CertProvidersConfig{ + CertManager: &operatorv1alpha1.CertManagerConfig{ + Mode: operatorv1alpha1.Enabled, }, - } + }, } }, wantErr: `failed to delete cert-controller rbac resource: test client error`, @@ -325,13 +333,13 @@ func TestCreateOrApplyRBACResource(t *testing.T) { return commontest.TestClientError }) }, - updateExternalSecretsObj: func(es *operatorv1alpha1.ExternalSecrets) { - if es.Spec.ExternalSecretsConfig == nil { - es.Spec.ExternalSecretsConfig = &operatorv1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &operatorv1alpha1.CertManagerConfig{ - Enabled: "true", + updateExternalSecretsConfig: func(es *operatorv1alpha1.ExternalSecretsConfig) { + es.Spec.ControllerConfig = operatorv1alpha1.ControllerConfig{ + CertProvider: &operatorv1alpha1.CertProvidersConfig{ + CertManager: &operatorv1alpha1.CertManagerConfig{ + Mode: operatorv1alpha1.Enabled, }, - } + }, } }, wantErr: ``, @@ -348,12 +356,12 @@ func TestCreateOrApplyRBACResource(t *testing.T) { } r.CtrlClient = mock - es := commontest.TestExternalSecrets() - if tt.updateExternalSecretsObj != nil { - tt.updateExternalSecretsObj(es) + esc := commontest.TestExternalSecretsConfig() + if tt.updateExternalSecretsConfig != nil { + tt.updateExternalSecretsConfig(esc) } - err := r.createOrApplyRBACResource(es, controllerDefaultResourceLabels, true) + err := r.createOrApplyRBACResource(esc, controllerDefaultResourceLabels, true) if (tt.wantErr != "" || err != nil) && (err == nil || err.Error() != tt.wantErr) { t.Errorf("createOrApplyRBACResource() err: %v, wantErr: %v", err, tt.wantErr) } diff --git a/pkg/controller/external_secrets/secret.go b/pkg/controller/external_secrets/secret.go index d3198c1d..136c55ca 100644 --- a/pkg/controller/external_secrets/secret.go +++ b/pkg/controller/external_secrets/secret.go @@ -11,9 +11,9 @@ import ( "github.com/openshift/external-secrets-operator/pkg/operator/assets" ) -func (r *Reconciler) createOrApplySecret(es *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string, recon bool) error { +func (r *Reconciler) createOrApplySecret(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string, recon bool) error { // secrets are only created if isCertManagerConfig is not enabled - if isCertManagerConfigEnabled(es) { + if isCertManagerConfigEnabled(esc) { r.log.V(4).Info("cert-manager config is enabled, deleting webhook component secret resource if exists") if err := common.DeleteObject(r.ctx, r.CtrlClient, &corev1.Secret{}, webhookTLSSecretAssetName); err != nil { return fmt.Errorf("failed to delete secret resource of webhook component: %w", err) @@ -21,7 +21,7 @@ func (r *Reconciler) createOrApplySecret(es *operatorv1alpha1.ExternalSecrets, r return nil } - desired, err := r.getSecretObject(es, resourceLabels) + desired, err := r.getSecretObject(esc, resourceLabels) if err != nil { return fmt.Errorf("failed to generate secret resource for creation: %w", err) } @@ -40,7 +40,7 @@ func (r *Reconciler) createOrApplySecret(es *operatorv1alpha1.ExternalSecrets, r } if exist && recon { - r.eventRecorder.Eventf(es, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s secret resource already exists, maybe from previous installation", secretName) + r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s secret resource already exists, maybe from previous installation", secretName) } if exist && common.ObjectMetadataModified(desired, fetched) { @@ -48,7 +48,7 @@ func (r *Reconciler) createOrApplySecret(es *operatorv1alpha1.ExternalSecrets, r if err := r.UpdateWithRetry(r.ctx, desired); err != nil { return common.FromClientError(err, "failed to update %s secret resource", secretName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "secret resource %s reconciled back to desired state", secretName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "secret resource %s reconciled back to desired state", secretName) } else { r.log.V(4).Info("secret resource already exists and is in expected state", "name", secretName) } @@ -57,16 +57,16 @@ func (r *Reconciler) createOrApplySecret(es *operatorv1alpha1.ExternalSecrets, r if err := r.Create(r.ctx, desired); err != nil { return common.FromClientError(err, "failed to create %s secret resource", secretName) } - r.eventRecorder.Eventf(es, corev1.EventTypeNormal, "Reconciled", "secret resource %s created", secretName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "secret resource %s created", secretName) } return nil } -func (r *Reconciler) getSecretObject(es *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string) (*corev1.Secret, error) { +func (r *Reconciler) getSecretObject(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string) (*corev1.Secret, error) { secret := common.DecodeSecretObjBytes(assets.MustAsset(webhookTLSSecretAssetName)) - updateNamespace(secret, es) + updateNamespace(secret, esc) common.UpdateResourceLabels(secret, resourceLabels) return secret, nil } diff --git a/pkg/controller/external_secrets/secret_test.go b/pkg/controller/external_secrets/secret_test.go index 354f21ef..03a19f63 100644 --- a/pkg/controller/external_secrets/secret_test.go +++ b/pkg/controller/external_secrets/secret_test.go @@ -22,47 +22,48 @@ func TestCreateOrApplySecret(t *testing.T) { tests := []struct { name string preReq func(*Reconciler, *fakes.FakeCtrlClient) - es func(*v1alpha1.ExternalSecrets) + esc func(*v1alpha1.ExternalSecretsConfig) wantErr string }{ { name: "external secret spec disabled", preReq: nil, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec = v1alpha1.ExternalSecretsSpec{} - }, - }, - { - name: "externalSecretConfig is nil", - preReq: nil, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = nil + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec = v1alpha1.ExternalSecretsConfigSpec{} }, }, { name: "webhook config is nil", preReq: nil, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{ - WebhookConfig: nil, + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec = v1alpha1.ExternalSecretsConfigSpec{ + ApplicationConfig: v1alpha1.ApplicationConfig{ + WebhookConfig: nil, + }, } }, }, { name: "webhook config is empty", preReq: nil, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{ - WebhookConfig: &v1alpha1.WebhookConfig{}, + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec = v1alpha1.ExternalSecretsConfigSpec{ + ApplicationConfig: v1alpha1.ApplicationConfig{ + WebhookConfig: &v1alpha1.WebhookConfig{}, + }, } }, }, { name: "cert manager config is nil", preReq: nil, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{ - CertManagerConfig: nil, + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec = v1alpha1.ExternalSecretsConfigSpec{ + ControllerConfig: v1alpha1.ControllerConfig{ + CertProvider: &v1alpha1.CertProvidersConfig{ + CertManager: nil, + }, + }, } }, }, @@ -198,10 +199,12 @@ func TestCreateOrApplySecret(t *testing.T) { return commontest.TestClientError }) }, - es: func(es *v1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &v1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &v1alpha1.CertManagerConfig{ - Enabled: "true", + esc: func(esc *v1alpha1.ExternalSecretsConfig) { + esc.Spec.ControllerConfig = v1alpha1.ControllerConfig{ + CertProvider: &v1alpha1.CertProvidersConfig{ + CertManager: &v1alpha1.CertManagerConfig{ + Mode: v1alpha1.Enabled, + }, }, } }, @@ -217,12 +220,12 @@ func TestCreateOrApplySecret(t *testing.T) { tt.preReq(r, mock) } r.CtrlClient = mock - es := testExternalSecretsForSecrets() - if tt.es != nil { - tt.es(es) + esc := testExternalSecretsConfigForSecrets() + if tt.esc != nil { + tt.esc(esc) } - err := r.createOrApplySecret(es, controllerDefaultResourceLabels, false) + err := r.createOrApplySecret(esc, controllerDefaultResourceLabels, false) if (tt.wantErr != "" || err != nil) && (err == nil || err.Error() != tt.wantErr) { t.Errorf("createOrApplySecret() err: %v, wantErr: %v", err, tt.wantErr) } @@ -230,18 +233,18 @@ func TestCreateOrApplySecret(t *testing.T) { } } -func testExternalSecretsForSecrets() *v1alpha1.ExternalSecrets { - externalSecrets := commontest.TestExternalSecrets() +func testExternalSecretsConfigForSecrets() *v1alpha1.ExternalSecretsConfig { + esc := commontest.TestExternalSecretsConfig() - externalSecrets.Spec = v1alpha1.ExternalSecretsSpec{ - ControllerConfig: &v1alpha1.ControllerConfig{ - Namespace: commontest.TestExternalSecretsNamespace, - }, - ExternalSecretsConfig: &v1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &v1alpha1.CertManagerConfig{ - Enabled: "false", + esc.Spec = v1alpha1.ExternalSecretsConfigSpec{ + ControllerConfig: v1alpha1.ControllerConfig{ + CertProvider: &v1alpha1.CertProvidersConfig{ + CertManager: &v1alpha1.CertManagerConfig{ + Mode: v1alpha1.Disabled, + }, }, }, + ApplicationConfig: v1alpha1.ApplicationConfig{}, } - return externalSecrets + return esc } diff --git a/pkg/controller/external_secrets/service_test.go b/pkg/controller/external_secrets/service_test.go index a43fcac3..644b6ef0 100644 --- a/pkg/controller/external_secrets/service_test.go +++ b/pkg/controller/external_secrets/service_test.go @@ -15,10 +15,10 @@ import ( func TestCreateOrApplyServices(t *testing.T) { tests := []struct { - name string - preReq func(*Reconciler, *fakes.FakeCtrlClient) - updateExternalSecretsObj func(*operatorv1alpha1.ExternalSecrets) - wantErr string + name string + preReq func(*Reconciler, *fakes.FakeCtrlClient) + updateExternalSecretsConfig func(config *operatorv1alpha1.ExternalSecretsConfig) + wantErr string }{ { name: "service reconciliation successful", @@ -56,10 +56,12 @@ func TestCreateOrApplyServices(t *testing.T) { return nil }) }, - updateExternalSecretsObj: func(es *operatorv1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &operatorv1alpha1.ExternalSecretsConfig{ - BitwardenSecretManagerProvider: &operatorv1alpha1.BitwardenSecretManagerProvider{ - Enabled: "true", + updateExternalSecretsConfig: func(esc *operatorv1alpha1.ExternalSecretsConfig) { + esc.Spec = operatorv1alpha1.ExternalSecretsConfigSpec{ + Plugins: operatorv1alpha1.PluginsConfig{ + BitwardenSecretManagerProvider: &operatorv1alpha1.BitwardenSecretManagerProvider{ + Mode: operatorv1alpha1.Enabled, + }, }, } }, @@ -122,11 +124,11 @@ func TestCreateOrApplyServices(t *testing.T) { tt.preReq(r, mock) } r.CtrlClient = mock - es := commontest.TestExternalSecrets() - if tt.updateExternalSecretsObj != nil { - tt.updateExternalSecretsObj(es) + esc := commontest.TestExternalSecretsConfig() + if tt.updateExternalSecretsConfig != nil { + tt.updateExternalSecretsConfig(esc) } - err := r.createOrApplyServices(es, controllerDefaultResourceLabels, false) + err := r.createOrApplyServices(esc, controllerDefaultResourceLabels, false) if (tt.wantErr != "" || err != nil) && (err == nil || err.Error() != tt.wantErr) { t.Errorf("createOrApplyServices() err: %v, wantErr: %v", err, tt.wantErr) } diff --git a/pkg/controller/external_secrets/serviceaccounts.go b/pkg/controller/external_secrets/serviceaccounts.go index a6b6ddcb..f9601b28 100644 --- a/pkg/controller/external_secrets/serviceaccounts.go +++ b/pkg/controller/external_secrets/serviceaccounts.go @@ -12,7 +12,7 @@ import ( ) // createOrApplyServiceAccounts ensures required service Account resources exist and are correctly configured. -func (r *Reconciler) createOrApplyServiceAccounts(externalsecrets *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string, externalsecretsCreateRecon bool) error { +func (r *Reconciler) createOrApplyServiceAccounts(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string, externalSecretsConfigCreateRecon bool) error { serviceAccountsToCreate := []struct { assetName string condition bool @@ -27,11 +27,11 @@ func (r *Reconciler) createOrApplyServiceAccounts(externalsecrets *operatorv1alp }, { assetName: certControllerServiceAccountAssetName, - condition: !isCertManagerConfigEnabled(externalsecrets), + condition: !isCertManagerConfigEnabled(esc), }, { assetName: bitwardenServiceAccountAssetName, - condition: isBitwardenConfigEnabled(externalsecrets), + condition: isBitwardenConfigEnabled(esc), }, } @@ -44,7 +44,7 @@ func (r *Reconciler) createOrApplyServiceAccounts(externalsecrets *operatorv1alp } desired := common.DecodeServiceAccountObjBytes(assets.MustAsset(serviceAccount.assetName)) - updateNamespace(desired, externalsecrets) + updateNamespace(desired, esc) common.UpdateResourceLabels(desired, resourceLabels) serviceAccountName := fmt.Sprintf("%s/%s", desired.GetNamespace(), desired.GetName()) @@ -62,15 +62,15 @@ func (r *Reconciler) createOrApplyServiceAccounts(externalsecrets *operatorv1alp } if exist { - if externalsecretsCreateRecon { - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s serviceaccount already exists, possibly from a previous install", serviceAccountName) + if externalSecretsConfigCreateRecon { + r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s serviceaccount already exists, possibly from a previous install", serviceAccountName) } r.log.V(4).Info("serviceaccount exists", "name", serviceAccountName) } else { if err := r.Create(r.ctx, desired); err != nil { return common.FromClientError(err, "failed to create serviceaccount %s", serviceAccountName) } - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeNormal, "Reconciled", "Created serviceaccount %s", serviceAccountName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "Created serviceaccount %s", serviceAccountName) } } diff --git a/pkg/controller/external_secrets/serviceaccounts_test.go b/pkg/controller/external_secrets/serviceaccounts_test.go index 7c80b370..02e52f31 100644 --- a/pkg/controller/external_secrets/serviceaccounts_test.go +++ b/pkg/controller/external_secrets/serviceaccounts_test.go @@ -27,10 +27,10 @@ func staticServiceAccounts() map[string]string { func TestCreateOrApplyServiceAccounts(t *testing.T) { tests := []struct { - name string - preReq func(*Reconciler, *fakes.FakeCtrlClient) - updateExternalSecretsObj func(*operatorv1alpha1.ExternalSecrets) - wantErr string + name string + preReq func(*Reconciler, *fakes.FakeCtrlClient) + updateExternalSecretsConfig func(*operatorv1alpha1.ExternalSecretsConfig) + wantErr string }{ { name: "all static serviceaccounts created successfully", @@ -72,10 +72,12 @@ func TestCreateOrApplyServiceAccounts(t *testing.T) { return nil }) }, - updateExternalSecretsObj: func(es *operatorv1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &operatorv1alpha1.ExternalSecretsConfig{ - BitwardenSecretManagerProvider: &operatorv1alpha1.BitwardenSecretManagerProvider{ - Enabled: "true", + updateExternalSecretsConfig: func(esc *operatorv1alpha1.ExternalSecretsConfig) { + esc.Spec = operatorv1alpha1.ExternalSecretsConfigSpec{ + Plugins: operatorv1alpha1.PluginsConfig{ + BitwardenSecretManagerProvider: &operatorv1alpha1.BitwardenSecretManagerProvider{ + Mode: operatorv1alpha1.Enabled, + }, }, } }, @@ -93,10 +95,14 @@ func TestCreateOrApplyServiceAccounts(t *testing.T) { }) }, wantErr: "", // <- no error expected - updateExternalSecretsObj: func(es *operatorv1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &operatorv1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &operatorv1alpha1.CertManagerConfig{ - Enabled: "true", + updateExternalSecretsConfig: func(esc *operatorv1alpha1.ExternalSecretsConfig) { + esc.Spec = operatorv1alpha1.ExternalSecretsConfigSpec{ + ControllerConfig: operatorv1alpha1.ControllerConfig{ + CertProvider: &operatorv1alpha1.CertProvidersConfig{ + CertManager: &operatorv1alpha1.CertManagerConfig{ + Mode: operatorv1alpha1.Enabled, + }, + }, }, } }, @@ -131,10 +137,12 @@ func TestCreateOrApplyServiceAccounts(t *testing.T) { return commontest.TestClientError }) }, - updateExternalSecretsObj: func(es *operatorv1alpha1.ExternalSecrets) { - es.Spec.ExternalSecretsConfig = &operatorv1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &operatorv1alpha1.CertManagerConfig{ - Enabled: "true", + updateExternalSecretsConfig: func(es *operatorv1alpha1.ExternalSecretsConfig) { + es.Spec.ControllerConfig = operatorv1alpha1.ControllerConfig{ + CertProvider: &operatorv1alpha1.CertProvidersConfig{ + CertManager: &operatorv1alpha1.CertManagerConfig{ + Mode: operatorv1alpha1.Enabled, + }, }, } }, @@ -151,12 +159,12 @@ func TestCreateOrApplyServiceAccounts(t *testing.T) { tt.preReq(r, mock) } - es := commontest.TestExternalSecrets() - if tt.updateExternalSecretsObj != nil { - tt.updateExternalSecretsObj(es) + esc := commontest.TestExternalSecretsConfig() + if tt.updateExternalSecretsConfig != nil { + tt.updateExternalSecretsConfig(esc) } - err := r.createOrApplyServiceAccounts(es, controllerDefaultResourceLabels, false) + err := r.createOrApplyServiceAccounts(esc, controllerDefaultResourceLabels, false) if tt.wantErr != "" { if err == nil || err.Error() != tt.wantErr { t.Errorf("Expected error: %v, got: %v", tt.wantErr, err) diff --git a/pkg/controller/external_secrets/services.go b/pkg/controller/external_secrets/services.go index 65a9d2b4..69b893bc 100644 --- a/pkg/controller/external_secrets/services.go +++ b/pkg/controller/external_secrets/services.go @@ -12,7 +12,7 @@ import ( ) // createOrApplyServices handles conditional and default creation of Services. -func (r *Reconciler) createOrApplyServices(externalsecrets *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string, externalsecretsCreateRecon bool) error { +func (r *Reconciler) createOrApplyServices(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string, externalSecretsConfigCreateRecon bool) error { servicesToCreate := []struct { assetName string condition bool @@ -27,11 +27,11 @@ func (r *Reconciler) createOrApplyServices(externalsecrets *operatorv1alpha1.Ext }, { assetName: certControllerMetricsServiceAssetName, - condition: !isCertManagerConfigEnabled(externalsecrets), + condition: !isCertManagerConfigEnabled(esc), }, { assetName: bitwardenServiceAssetName, - condition: isBitwardenConfigEnabled(externalsecrets), + condition: isBitwardenConfigEnabled(esc), }, } @@ -39,7 +39,7 @@ func (r *Reconciler) createOrApplyServices(externalsecrets *operatorv1alpha1.Ext if !service.condition { continue } - if err := r.createOrApplyServiceFromAsset(externalsecrets, service.assetName, resourceLabels, externalsecretsCreateRecon); err != nil { + if err := r.createOrApplyServiceFromAsset(esc, service.assetName, resourceLabels, externalSecretsConfigCreateRecon); err != nil { return err } } @@ -48,9 +48,9 @@ func (r *Reconciler) createOrApplyServices(externalsecrets *operatorv1alpha1.Ext } // createOrApplyServiceFromAsset decodes a Service YAML asset and ensures it exists in the cluster. -func (r *Reconciler) createOrApplyServiceFromAsset(externalsecrets *operatorv1alpha1.ExternalSecrets, assetName string, resourceLabels map[string]string, externalsecretsCreateRecon bool) error { +func (r *Reconciler) createOrApplyServiceFromAsset(esc *operatorv1alpha1.ExternalSecretsConfig, assetName string, resourceLabels map[string]string, externalSecretsConfigCreateRecon bool) error { service := common.DecodeServiceObjBytes(assets.MustAsset(assetName)) - updateNamespace(service, externalsecrets) + updateNamespace(service, esc) common.UpdateResourceLabels(service, resourceLabels) serviceName := fmt.Sprintf("%s/%s", service.GetNamespace(), service.GetName()) @@ -67,15 +67,15 @@ func (r *Reconciler) createOrApplyServiceFromAsset(externalsecrets *operatorv1al } if exists { - if externalsecretsCreateRecon { - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s already exists", serviceName) + if externalSecretsConfigCreateRecon { + r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s already exists", serviceName) } if common.HasObjectChanged(service, fetched) { r.log.V(1).Info("Service modified, updating", "name", serviceName) if err := r.UpdateWithRetry(r.ctx, service); err != nil { return common.FromClientError(err, "failed to update service %s", serviceName) } - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeNormal, "Reconciled", "Service %s updated", serviceName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "Service %s updated", serviceName) } else { r.log.V(4).Info("Service already up-to-date", "name", serviceName) } @@ -83,7 +83,7 @@ func (r *Reconciler) createOrApplyServiceFromAsset(externalsecrets *operatorv1al if err := r.Create(r.ctx, service); err != nil { return common.FromClientError(err, "failed to create service %s", serviceName) } - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeNormal, "Reconciled", "Service %s created", serviceName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "Service %s created", serviceName) } return nil diff --git a/pkg/controller/external_secrets/utils.go b/pkg/controller/external_secrets/utils.go index f3885188..c7c38b35 100644 --- a/pkg/controller/external_secrets/utils.go +++ b/pkg/controller/external_secrets/utils.go @@ -15,39 +15,35 @@ import ( "github.com/openshift/external-secrets-operator/pkg/controller/common" ) -func getNamespace(es *operatorv1alpha1.ExternalSecrets) string { - ns := externalsecretsDefaultNamespace - if es.Spec.ControllerConfig != nil && es.Spec.ControllerConfig.Namespace != "" { - ns = es.Spec.ControllerConfig.Namespace - } - return ns +func getNamespace(_ *operatorv1alpha1.ExternalSecretsConfig) string { + return externalsecretsDefaultNamespace } -func updateNamespace(obj client.Object, es *operatorv1alpha1.ExternalSecrets) { - obj.SetNamespace(getNamespace(es)) +func updateNamespace(obj client.Object, esc *operatorv1alpha1.ExternalSecretsConfig) { + obj.SetNamespace(getNamespace(esc)) } -func containsProcessedAnnotation(externalsecrets *operatorv1alpha1.ExternalSecrets) bool { - _, exist := externalsecrets.GetAnnotations()[controllerProcessedAnnotation] +func containsProcessedAnnotation(esc *operatorv1alpha1.ExternalSecretsConfig) bool { + _, exist := esc.GetAnnotations()[controllerProcessedAnnotation] return exist } -func addProcessedAnnotation(externalsecrets *operatorv1alpha1.ExternalSecrets) bool { - annotations := externalsecrets.GetAnnotations() +func addProcessedAnnotation(esc *operatorv1alpha1.ExternalSecretsConfig) bool { + annotations := esc.GetAnnotations() if annotations == nil { annotations = make(map[string]string, 1) } if _, exist := annotations[controllerProcessedAnnotation]; !exist { annotations[controllerProcessedAnnotation] = "true" - externalsecrets.SetAnnotations(annotations) + esc.SetAnnotations(annotations) return true } return false } -func (r *Reconciler) updateCondition(externalsecrets *operatorv1alpha1.ExternalSecrets, prependErr error) error { - if err := r.updateStatus(r.ctx, externalsecrets); err != nil { - errUpdate := fmt.Errorf("failed to update %s/%s status: %w", externalsecrets.GetNamespace(), externalsecrets.GetName(), err) +func (r *Reconciler) updateCondition(esc *operatorv1alpha1.ExternalSecretsConfig, prependErr error) error { + if err := r.updateStatus(r.ctx, esc); err != nil { + errUpdate := fmt.Errorf("failed to update %s/%s status: %w", esc.GetNamespace(), esc.GetName(), err) if prependErr != nil { return utilerrors.NewAggregate([]error{err, errUpdate}) } @@ -56,19 +52,19 @@ func (r *Reconciler) updateCondition(externalsecrets *operatorv1alpha1.ExternalS return prependErr } -// updateStatus is for updating the status subresource of externalsecrets.openshift.operator.io. -func (r *Reconciler) updateStatus(ctx context.Context, changed *operatorv1alpha1.ExternalSecrets) error { +// updateStatus is for updating the status subresource of externalsecretsconfigs.operator.openshift.io. +func (r *Reconciler) updateStatus(ctx context.Context, changed *operatorv1alpha1.ExternalSecretsConfig) error { namespacedName := types.NamespacedName{Name: changed.Name, Namespace: changed.Namespace} if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - r.log.V(4).Info("updating externalsecrets.openshift.operator.io status", "request", namespacedName) - current := &operatorv1alpha1.ExternalSecrets{} + r.log.V(4).Info("updating externalsecretsconfigs.operator.openshift.io status", "request", namespacedName) + current := &operatorv1alpha1.ExternalSecretsConfig{} if err := r.Get(ctx, namespacedName, current); err != nil { - return fmt.Errorf("failed to fetch externalsecrets.openshift.operator.io %q for status update: %w", namespacedName, err) + return fmt.Errorf("failed to fetch externalsecretsconfigs.operator.openshift.io %q for status update: %w", namespacedName, err) } changed.Status.DeepCopyInto(¤t.Status) if err := r.StatusUpdate(ctx, current); err != nil { - return fmt.Errorf("failed to update externalsecrets.openshift.operator.io %q status: %w", namespacedName, err) + return fmt.Errorf("failed to update externalsecretsconfigs.operator.openshift.io %q status: %w", namespacedName, err) } return nil @@ -79,43 +75,42 @@ func (r *Reconciler) updateStatus(ctx context.Context, changed *operatorv1alpha1 return nil } -// validateExternalSecretsConfig is for validating the ExternalSecrets CR fields, apart from the +// validateExternalSecretsConfig is for validating the ExternalSecretsConfig CR fields, apart from the // CEL validations present in CRD. -func (r *Reconciler) validateExternalSecretsConfig(es *operatorv1alpha1.ExternalSecrets) error { - if isCertManagerConfigEnabled(es) { +func (r *Reconciler) validateExternalSecretsConfig(esc *operatorv1alpha1.ExternalSecretsConfig) error { + if isCertManagerConfigEnabled(esc) { if _, ok := r.optionalResourcesList[certificateCRDGKV]; !ok { - return fmt.Errorf("spec.externalSecretsConfig.webhookConfig.certManagerConfig.enabled is set, but cert-manager is not installed") + return fmt.Errorf("spec.certManagerConfig.enabled is set, but cert-manager is not installed") } - } return nil } -// isCertManagerConfigEnabled returns whether CertManagerConfig is enabled in ExternalSecrets CR Spec. -func isCertManagerConfigEnabled(es *operatorv1alpha1.ExternalSecrets) bool { - return es.Spec != (operatorv1alpha1.ExternalSecretsSpec{}) && es.Spec.ExternalSecretsConfig != nil && - es.Spec.ExternalSecretsConfig.CertManagerConfig != nil && - common.ParseBool(es.Spec.ExternalSecretsConfig.CertManagerConfig.Enabled) +// isCertManagerConfigEnabled returns whether CertManagerConfig is enabled in ExternalSecretsConfig CR Spec. +func isCertManagerConfigEnabled(esc *operatorv1alpha1.ExternalSecretsConfig) bool { + return esc.Spec.ControllerConfig.CertProvider != nil && + esc.Spec.ControllerConfig.CertProvider.CertManager != nil && + common.EvalMode(esc.Spec.ControllerConfig.CertProvider.CertManager.Mode) } -// isBitwardenConfigEnabled returns whether CertManagerConfig is enabled in ExternalSecrets CR Spec. -func isBitwardenConfigEnabled(es *operatorv1alpha1.ExternalSecrets) bool { - return es.Spec != (operatorv1alpha1.ExternalSecretsSpec{}) && es.Spec.ExternalSecretsConfig != nil && es.Spec.ExternalSecretsConfig.BitwardenSecretManagerProvider != nil && - common.ParseBool(es.Spec.ExternalSecretsConfig.BitwardenSecretManagerProvider.Enabled) +// isBitwardenConfigEnabled returns whether BitwardenSecretManagerProvider is enabled in ExternalSecretsConfig CR Spec. +func isBitwardenConfigEnabled(esc *operatorv1alpha1.ExternalSecretsConfig) bool { + return esc.Spec.Plugins.BitwardenSecretManagerProvider != nil && + common.EvalMode(esc.Spec.Plugins.BitwardenSecretManagerProvider.Mode) } -func getLogLevel(config *operatorv1alpha1.ExternalSecretsConfig) string { - if config != nil { - return zapcore.Level(config.LogLevel).String() +func getLogLevel(config operatorv1alpha1.ExternalSecretsConfigSpec) string { + switch config.ApplicationConfig.LogLevel { + case 0, 1, 2: + return zapcore.Level(config.ApplicationConfig.LogLevel).String() + case 4, 5: + return zapcore.DebugLevel.String() } - return "info" + return zapcore.InfoLevel.String() } -func getOperatingNamespace(externalsecrets *operatorv1alpha1.ExternalSecrets) string { - if externalsecrets == nil || externalsecrets.Spec.ExternalSecretsConfig == nil { - return "" - } - return externalsecrets.Spec.ExternalSecretsConfig.OperatingNamespace +func getOperatingNamespace(esc *operatorv1alpha1.ExternalSecretsConfig) string { + return esc.Spec.ApplicationConfig.OperatingNamespace } func (r *Reconciler) IsCertManagerInstalled() bool { diff --git a/pkg/controller/external_secrets/validatingwebhook.go b/pkg/controller/external_secrets/validatingwebhook.go index 2eb7524b..729c15bf 100644 --- a/pkg/controller/external_secrets/validatingwebhook.go +++ b/pkg/controller/external_secrets/validatingwebhook.go @@ -12,8 +12,8 @@ import ( "github.com/openshift/external-secrets-operator/pkg/operator/assets" ) -func (r *Reconciler) createOrApplyValidatingWebhookConfiguration(externalsecrets *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string, recon bool) error { - desiredWebhooks, err := r.getValidatingWebhookObjects(externalsecrets, resourceLabels) +func (r *Reconciler) createOrApplyValidatingWebhookConfiguration(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string, recon bool) error { + desiredWebhooks, err := r.getValidatingWebhookObjects(esc, resourceLabels) if err != nil { return fmt.Errorf("failed to generate validatingWebhook resource for creation: %w", err) } @@ -31,14 +31,14 @@ func (r *Reconciler) createOrApplyValidatingWebhookConfiguration(externalsecrets } if exist && recon { - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s validatingWebhook resource already exists, maybe from previous installation", validatingWebhookName) + r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "ResourceAlreadyExists", "%s validatingWebhook resource already exists, maybe from previous installation", validatingWebhookName) } if exist && common.HasObjectChanged(desired, fetched) { r.log.V(1).Info("validatingWebhook has been modified", "updating to desired state", "name", validatingWebhookName) if err := r.UpdateWithRetry(r.ctx, desired); err != nil { return common.FromClientError(err, "failed to update %s validatingWebhook resource with desired state", validatingWebhookName) } - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeNormal, "Reconciled", "validatingWebhook resource %s reconciled back to desired state", validatingWebhookName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "validatingWebhook resource %s reconciled back to desired state", validatingWebhookName) } else { r.log.V(4).Info("validatingWebhook resource already exists and is in expected state", "name", validatingWebhookName) } @@ -47,14 +47,14 @@ func (r *Reconciler) createOrApplyValidatingWebhookConfiguration(externalsecrets if err := r.Create(r.ctx, desired); err != nil { return common.FromClientError(err, "failed to create validatingWebhook resource %s", validatingWebhookName) } - r.eventRecorder.Eventf(externalsecrets, corev1.EventTypeNormal, "Reconciled", "validatingWebhook resource %s created", validatingWebhookName) + r.eventRecorder.Eventf(esc, corev1.EventTypeNormal, "Reconciled", "validatingWebhook resource %s created", validatingWebhookName) } } return nil } -func (r *Reconciler) getValidatingWebhookObjects(externalsecrets *operatorv1alpha1.ExternalSecrets, resourceLabels map[string]string) ([]*webhook.ValidatingWebhookConfiguration, error) { +func (r *Reconciler) getValidatingWebhookObjects(esc *operatorv1alpha1.ExternalSecretsConfig, resourceLabels map[string]string) ([]*webhook.ValidatingWebhookConfiguration, error) { var webhooks []*webhook.ValidatingWebhookConfiguration for _, assetName := range []string{validatingWebhookExternalSecretCRDAssetName, validatingWebhookSecretStoreCRDAssetName} { @@ -62,8 +62,8 @@ func (r *Reconciler) getValidatingWebhookObjects(externalsecrets *operatorv1alph validatingWebhook := common.DecodeValidatingWebhookConfigurationObjBytes(assets.MustAsset(assetName)) common.UpdateResourceLabels(validatingWebhook, resourceLabels) - if err := updateValidatingWebhookAnnotation(externalsecrets, validatingWebhook); err != nil { - return nil, fmt.Errorf("failed to update validatingWebhook resource for %s external secrets: %s", externalsecrets.GetName(), err.Error()) + if err := updateValidatingWebhookAnnotation(esc, validatingWebhook); err != nil { + return nil, fmt.Errorf("failed to update validatingWebhook resource for %s external secrets: %s", esc.GetName(), err.Error()) } webhooks = append(webhooks, validatingWebhook) @@ -72,15 +72,18 @@ func (r *Reconciler) getValidatingWebhookObjects(externalsecrets *operatorv1alph return webhooks, nil } -func updateValidatingWebhookAnnotation(externalsecrets *operatorv1alpha1.ExternalSecrets, webhook *webhook.ValidatingWebhookConfiguration) error { - if externalsecrets != nil && - externalsecrets.Spec.ExternalSecretsConfig != nil && - externalsecrets.Spec.ExternalSecretsConfig.CertManagerConfig != nil { - if common.ParseBool(externalsecrets.Spec.ExternalSecretsConfig.CertManagerConfig.AddInjectorAnnotations) { - if webhook.Annotations == nil { - webhook.Annotations = map[string]string{} - } - webhook.Annotations[common.CertManagerInjectCAFromAnnotation] = common.CertManagerInjectCAFromAnnotationValue +func updateValidatingWebhookAnnotation(esc *operatorv1alpha1.ExternalSecretsConfig, webhook *webhook.ValidatingWebhookConfiguration) error { + if common.IsInjectCertManagerAnnotationEnabled(esc) { + if webhook.Annotations == nil { + webhook.Annotations = map[string]string{} + } + webhook.Annotations[common.CertManagerInjectCAFromAnnotation] = common.CertManagerInjectCAFromAnnotationValue + return nil + } + if webhook.Annotations != nil { + delete(webhook.Annotations, common.CertManagerInjectCAFromAnnotation) + if len(webhook.Annotations) == 0 { + webhook.Annotations = nil } } return nil diff --git a/pkg/controller/external_secrets/validatingwebhook_test.go b/pkg/controller/external_secrets/validatingwebhook_test.go index d30682bc..be17bfbd 100644 --- a/pkg/controller/external_secrets/validatingwebhook_test.go +++ b/pkg/controller/external_secrets/validatingwebhook_test.go @@ -116,14 +116,16 @@ func TestCreateOrApplyValidatingWebhookConfiguration(t *testing.T) { } } -func testExternalSecretsForValidateWebhookConfiguration() *v1alpha1.ExternalSecrets { - externalSecrets := commontest.TestExternalSecrets() - externalSecrets.Spec = v1alpha1.ExternalSecretsSpec{ - ExternalSecretsConfig: &v1alpha1.ExternalSecretsConfig{ - CertManagerConfig: &v1alpha1.CertManagerConfig{ - AddInjectorAnnotations: "true", +func testExternalSecretsForValidateWebhookConfiguration() *v1alpha1.ExternalSecretsConfig { + esc := commontest.TestExternalSecretsConfig() + esc.Spec = v1alpha1.ExternalSecretsConfigSpec{ + ControllerConfig: v1alpha1.ControllerConfig{ + CertProvider: &v1alpha1.CertProvidersConfig{ + CertManager: &v1alpha1.CertManagerConfig{ + InjectAnnotations: "true", + }, }, }, } - return externalSecrets + return esc } diff --git a/pkg/controller/external_secrets_manager/controller.go b/pkg/controller/external_secrets_manager/controller.go index b83c1ae6..5422c6ea 100644 --- a/pkg/controller/external_secrets_manager/controller.go +++ b/pkg/controller/external_secrets_manager/controller.go @@ -45,23 +45,23 @@ import ( const ( ControllerName = "external-secrets-manager" - // finalizer name for external-secrets.openshift.operator.io resource. - finalizer = "externalsecretsmanager.openshift.operator.io/" + ControllerName + // finalizer name for externalsecretsconfigs.operator.openshift.io resource. + finalizer = "externalsecretsmanagers.operator.openshift.io/" + ControllerName ) var ( - externalSecretsControllerId = fmt.Sprintf("externalsecrets.%s/%s", operatorv1alpha1.GroupVersion.Group, operatorv1alpha1.GroupVersion.Version) + externalSecretsControllerId = fmt.Sprintf("externalsecretsconfigs.%s/%s", operatorv1alpha1.GroupVersion.Group, operatorv1alpha1.GroupVersion.Version) ) -// Reconciler reconciles externalsecretsmanager.openshift.operator.io CR. +// Reconciler reconciles externalsecretsmanagers.operator.openshift.io CR. type Reconciler struct { operatorclient.CtrlClient - Scheme *runtime.Scheme - ctx context.Context - eventRecorder record.EventRecorder - log logr.Logger - now *common.Now - externalSecrets *operatorv1alpha1.ExternalSecrets + Scheme *runtime.Scheme + ctx context.Context + eventRecorder record.EventRecorder + log logr.Logger + now *common.Now + esc *operatorv1alpha1.ExternalSecretsConfig } // +kubebuilder:rbac:groups=operator.openshift.io,resources=externalsecretsmanagers,verbs=get;list;watch;create;update;patch;delete @@ -90,8 +90,8 @@ func NewClient(m manager.Manager) operatorclient.CtrlClient { func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { statusUpdatePredicate := predicate.Funcs{ UpdateFunc: func(e event.UpdateEvent) bool { - oldObj := e.ObjectOld.(*operatorv1alpha1.ExternalSecrets) - newObj := e.ObjectNew.(*operatorv1alpha1.ExternalSecrets) + oldObj := e.ObjectOld.(*operatorv1alpha1.ExternalSecretsConfig) + newObj := e.ObjectNew.(*operatorv1alpha1.ExternalSecretsConfig) return !reflect.DeepEqual(oldObj.Status, newObj.Status) }, } @@ -99,7 +99,7 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { return ctrl.NewControllerManagedBy(mgr). For(&operatorv1alpha1.ExternalSecretsManager{}). Named(ControllerName). - Watches(&operatorv1alpha1.ExternalSecrets{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(statusUpdatePredicate)). + Watches(&operatorv1alpha1.ExternalSecretsConfig{}, &handler.EnqueueRequestForObject{}, builder.WithPredicates(statusUpdatePredicate)). Complete(r) } @@ -107,21 +107,21 @@ func (r *Reconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { r.log.V(1).Info("reconciling", "request", req) - // Fetch the externalsecretsmanager.openshift.operator.io CR + // Fetch the externalsecretsmanagers.operator.openshift.io CR esm := &operatorv1alpha1.ExternalSecretsManager{} key := types.NamespacedName{ Name: common.ExternalSecretsManagerObjectName, } if err := r.Get(ctx, key, esm); err != nil { r.now.Do(func() { - r.eventRecorder.Eventf(esm, corev1.EventTypeWarning, "Read", "failed to fetch externalsecretsmanager.openshift.operator.io %q", key) + r.eventRecorder.Eventf(esm, corev1.EventTypeWarning, "Read", "failed to fetch externalsecretsmanagers.operator.openshift.io %q", key) }) - return ctrl.Result{RequeueAfter: common.DefaultRequeueTime}, fmt.Errorf("failed to fetch externalsecretsmanager.openshift.operator.io %q during reconciliation: %w", key, err) + return ctrl.Result{RequeueAfter: common.DefaultRequeueTime}, fmt.Errorf("failed to fetch externalsecretsmanagers.operator.openshift.io %q during reconciliation: %w", key, err) } r.now.Reset() if !esm.DeletionTimestamp.IsZero() { - r.log.V(1).Info("externalsecretsmanager.openshift.operator.io is marked for deletion", "key", key) + r.log.V(1).Info("externalsecretsmanagers.operator.openshift.io is marked for deletion", "key", key) if err := common.RemoveFinalizer(ctx, esm, r.CtrlClient, finalizer); err != nil { return ctrl.Result{}, err @@ -131,24 +131,24 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu return ctrl.Result{}, nil } - // Set finalizers on the externalsecretsmanager.openshift.operator.io resource + // Set finalizers on the externalsecretsmanagers.operator.openshift.io resource if err := common.AddFinalizer(ctx, esm, r.CtrlClient, finalizer); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to update %q externalsecretsmanager.openshift.operator.io with finalizers: %w", key, err) + return ctrl.Result{}, fmt.Errorf("failed to update %q externalsecretsmanagers.operator.openshift.io with finalizers: %w", key, err) } - // Fetch the externalsecrets.openshift.operator.io CR - r.externalSecrets = new(operatorv1alpha1.ExternalSecrets) + // Fetch the externalsecretsconfigs.operator.openshift.io CR + r.esc = new(operatorv1alpha1.ExternalSecretsConfig) key = types.NamespacedName{ - Name: common.ExternalSecretsObjectName, + Name: common.ExternalSecretsConfigObjectName, } - if err := r.Get(ctx, key, r.externalSecrets); err != nil { + if err := r.Get(ctx, key, r.esc); err != nil { if errors.IsNotFound(err) { // NotFound errors, would mean the object hasn't been created yet and // not required to reconcile yet. - r.log.V(1).Info("externalsecrets.openshift.operator.io object not found, skipping reconciliation", "key", key) + r.log.V(1).Info("externalsecretsconfigs.operator.openshift.io object not found, skipping reconciliation", "key", key) return ctrl.Result{}, nil } - return ctrl.Result{}, fmt.Errorf("failed to fetch externalsecrets.openshift.operator.io %q during reconciliation: %w", key, err) + return ctrl.Result{}, fmt.Errorf("failed to fetch externalsecretsconfigs.operator.openshift.io %q during reconciliation: %w", key, err) } return r.processReconcileRequest(esm) @@ -160,8 +160,8 @@ func (r *Reconciler) processReconcileRequest(esm *operatorv1alpha1.ExternalSecre if esm.Status.ControllerStatuses == nil { esm.Status.ControllerStatuses = make([]operatorv1alpha1.ControllerStatus, 0) } - if r.externalSecrets != nil && len(r.externalSecrets.Status.Conditions) > 0 { - for _, esCond := range r.externalSecrets.Status.Conditions { + if r.esc != nil && len(r.esc.Status.Conditions) > 0 { + for _, esCond := range r.esc.Status.Conditions { if r.updateStatusCondition(esm, externalSecretsControllerId, esCond) { statusUpdated = true } @@ -170,7 +170,7 @@ func (r *Reconciler) processReconcileRequest(esm *operatorv1alpha1.ExternalSecre if statusUpdated { if err := r.updateStatus(r.ctx, esm); err != nil { - return ctrl.Result{}, fmt.Errorf("failed to update externalsecretsmanager.openshift.operator.io status: %w", err) + return ctrl.Result{}, fmt.Errorf("failed to update externalsecretsmanagers.operator.openshift.io status: %w", err) } } @@ -223,19 +223,19 @@ func (r *Reconciler) updateStatusCondition(esm *operatorv1alpha1.ExternalSecrets return condUpdated } -// updateStatus is for updating the status subresource of externalsecretsmanager.openshift.operator.io. +// updateStatus is for updating the status subresource of externalsecretsmanagers.operator.openshift.io. func (r *Reconciler) updateStatus(ctx context.Context, changed *operatorv1alpha1.ExternalSecretsManager) error { namespacedName := types.NamespacedName{Name: changed.Name, Namespace: changed.Namespace} if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - r.log.V(4).Info("updating externalsecretsmanager.openshift.operator.io status", "request", namespacedName) + r.log.V(4).Info("updating externalsecretsmanagers.operator.openshift.io status", "request", namespacedName) current := &operatorv1alpha1.ExternalSecretsManager{} if err := r.Get(ctx, namespacedName, current); err != nil { - return fmt.Errorf("failed to fetch externalsecretsmanager.openshift.operator.io %q for status update: %w", namespacedName, err) + return fmt.Errorf("failed to fetch externalsecretsmanagers.operator.openshift.io %q for status update: %w", namespacedName, err) } changed.Status.DeepCopyInto(¤t.Status) if err := r.StatusUpdate(ctx, current); err != nil { - return fmt.Errorf("failed to update externalsecretsmanager.openshift.operator.io %q status: %w", namespacedName, err) + return fmt.Errorf("failed to update externalsecretsmanagers.operator.openshift.io %q status: %w", namespacedName, err) } return nil diff --git a/pkg/controller/external_secrets_manager/controller_test.go b/pkg/controller/external_secrets_manager/controller_test.go index 84e328c3..aeaca3eb 100644 --- a/pkg/controller/external_secrets_manager/controller_test.go +++ b/pkg/controller/external_secrets_manager/controller_test.go @@ -68,13 +68,13 @@ func TestReconcile(t *testing.T) { }) m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: - es := &operatorv1alpha1.ExternalSecrets{ + case *operatorv1alpha1.ExternalSecretsConfig: + esc := &operatorv1alpha1.ExternalSecretsConfig{ ObjectMeta: metav1.ObjectMeta{ - Name: common.ExternalSecretsObjectName, + Name: common.ExternalSecretsConfigObjectName, }, - Spec: operatorv1alpha1.ExternalSecretsSpec{}, - Status: operatorv1alpha1.ExternalSecretsStatus{ + Spec: operatorv1alpha1.ExternalSecretsConfigSpec{}, + Status: operatorv1alpha1.ExternalSecretsConfigStatus{ ConditionalStatus: operatorv1alpha1.ConditionalStatus{ Conditions: []metav1.Condition{ { @@ -91,7 +91,7 @@ func TestReconcile(t *testing.T) { }, }, } - es.DeepCopyInto(o) + esc.DeepCopyInto(o) case *operatorv1alpha1.ExternalSecretsManager: esmObj := &operatorv1alpha1.ExternalSecretsManager{ ObjectMeta: metav1.ObjectMeta{ @@ -128,16 +128,16 @@ func TestReconcile(t *testing.T) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch obj.(type) { case *operatorv1alpha1.ExternalSecretsManager: - return errors.NewNotFound(operatorv1alpha1.Resource("externalsecretsmanager"), ns.Name) + return errors.NewNotFound(operatorv1alpha1.Resource("externalsecretsmanagers"), ns.Name) } return nil }) }, expectedStatusCondition: []operatorv1alpha1.ControllerStatus{}, - wantErr: `failed to fetch externalsecretsmanager.openshift.operator.io "/cluster" during reconciliation: externalsecretsmanager.operator.openshift.io "cluster" not found`, + wantErr: `failed to fetch externalsecretsmanagers.operator.openshift.io "/cluster" during reconciliation: externalsecretsmanagers.operator.openshift.io "cluster" not found`, }, { - name: "es object not found", + name: "externalsecretsconfig object not found", preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { @@ -149,8 +149,8 @@ func TestReconcile(t *testing.T) { Spec: operatorv1alpha1.ExternalSecretsManagerSpec{}, } esmObj.DeepCopyInto(o) - case *operatorv1alpha1.ExternalSecrets: - return errors.NewNotFound(operatorv1alpha1.Resource("externalsecrets"), ns.Name) + case *operatorv1alpha1.ExternalSecretsConfig: + return errors.NewNotFound(operatorv1alpha1.Resource("externalsecretsconfigs"), ns.Name) } return nil }) @@ -158,7 +158,7 @@ func TestReconcile(t *testing.T) { expectedStatusCondition: []operatorv1alpha1.ControllerStatus{}, }, { - name: "es fetch fails", + name: "externalsecretsconfig fetch fails", preReq: func(r *Reconciler, m *fakes.FakeCtrlClient) { m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { @@ -170,14 +170,14 @@ func TestReconcile(t *testing.T) { Spec: operatorv1alpha1.ExternalSecretsManagerSpec{}, } esmObj.DeepCopyInto(o) - case *operatorv1alpha1.ExternalSecrets: - return errors.NewServerTimeout(operatorv1alpha1.Resource("externalsecrets"), "Get", int(5)) + case *operatorv1alpha1.ExternalSecretsConfig: + return errors.NewServerTimeout(operatorv1alpha1.Resource("externalsecretsconfig"), "Get", 5) } return nil }) }, expectedStatusCondition: []operatorv1alpha1.ControllerStatus{}, - wantErr: `failed to fetch externalsecrets.openshift.operator.io "/cluster" during reconciliation: The Get operation against externalsecrets.operator.openshift.io could not be completed at this time, please try again.`, + wantErr: `failed to fetch externalsecretsconfigs.operator.openshift.io "/cluster" during reconciliation: The Get operation against externalsecretsconfig.operator.openshift.io could not be completed at this time, please try again.`, }, { name: "esm reconciliation successful with new conditions", @@ -198,13 +198,13 @@ func TestReconcile(t *testing.T) { }) m.GetCalls(func(ctx context.Context, ns types.NamespacedName, obj client.Object) error { switch o := obj.(type) { - case *operatorv1alpha1.ExternalSecrets: - es := &operatorv1alpha1.ExternalSecrets{ + case *operatorv1alpha1.ExternalSecretsConfig: + esc := &operatorv1alpha1.ExternalSecretsConfig{ ObjectMeta: metav1.ObjectMeta{ - Name: common.ExternalSecretsObjectName, + Name: common.ExternalSecretsConfigObjectName, }, - Spec: operatorv1alpha1.ExternalSecretsSpec{}, - Status: operatorv1alpha1.ExternalSecretsStatus{ + Spec: operatorv1alpha1.ExternalSecretsConfigSpec{}, + Status: operatorv1alpha1.ExternalSecretsConfigStatus{ ConditionalStatus: operatorv1alpha1.ConditionalStatus{ Conditions: []metav1.Condition{ { @@ -221,7 +221,7 @@ func TestReconcile(t *testing.T) { }, }, } - es.DeepCopyInto(o) + esc.DeepCopyInto(o) case *operatorv1alpha1.ExternalSecretsManager: esmObj := &operatorv1alpha1.ExternalSecretsManager{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/controller/external_secrets_manager/externalsecretsmanager.go b/pkg/controller/external_secrets_manager/externalsecretsmanager.go index 0b5cf5a7..a944d050 100644 --- a/pkg/controller/external_secrets_manager/externalsecretsmanager.go +++ b/pkg/controller/external_secrets_manager/externalsecretsmanager.go @@ -12,7 +12,7 @@ import ( "github.com/openshift/external-secrets-operator/pkg/controller/common" ) -// CreateDefaultESMResource is for creating the default externalsecretsmanager.openshift.operator.io resource, +// CreateDefaultESMResource is for creating the default externalsecretsmanagers.operator.openshift.io resource, // which will be updated by the user with required configurations. Controller creates and manages the resource. func CreateDefaultESMResource(ctx context.Context, client client.Client) error { esm := &operatorv1alpha1.ExternalSecretsManager{ diff --git a/pkg/operator/setup_manager.go b/pkg/operator/setup_manager.go index 4e8e326b..07eeb368 100644 --- a/pkg/operator/setup_manager.go +++ b/pkg/operator/setup_manager.go @@ -20,18 +20,18 @@ func StartControllers(ctx context.Context, mgr ctrl.Manager) error { return err } - externalsecrets, err := escontroller.New(ctx, mgr) + externalSecretsConfig, err := escontroller.New(ctx, mgr) if err != nil { logger.Error(err, "failed to create controller", "controller", escontroller.ControllerName) return err } - if err = externalsecrets.SetupWithManager(mgr); err != nil { + if err = externalSecretsConfig.SetupWithManager(mgr); err != nil { logger.Error(err, "failed to set up controller with manager", "controller", escontroller.ControllerName) return err } - if externalsecrets.IsCertManagerInstalled() { + if externalSecretsConfig.IsCertManagerInstalled() { crdAnnotator, err := crdannotator.New(mgr) if err != nil { logger.Error(err, "failed to create crd annotator controller", "controller", crdannotator.ControllerName) @@ -50,7 +50,7 @@ func StartControllers(ctx context.Context, mgr ctrl.Manager) error { return err } if err = esmcontroller.CreateDefaultESMResource(ctx, uncachedClient); err != nil { - logger.Error(err, "failed to create default externalsecretsmanager.openshift.operator.io resource") + logger.Error(err, "failed to create default externalsecretsmanagers.operator.openshift.io resource") return err } diff --git a/test/e2e/e2e_test.go b/test/e2e/e2e_test.go index c2eb3546..e90732d7 100644 --- a/test/e2e/e2e_test.go +++ b/test/e2e/e2e_test.go @@ -78,12 +78,12 @@ var _ = Describe("External Secrets Operator End-to-End test scenarios", Ordered, "external-secrets-operator-controller-manager-", })).To(Succeed()) - By("Creating the externalsecrets.openshift.operator.io/cluster CR") + By("Creating the externalsecretsconfig.operator.openshift.io/cluster CR") loader.CreateFromFile(testassets.ReadFile, externalSecrets, operatorNamespace) }) AfterAll(func() { - By("Deleting the externalsecrets.openshift.operator.io/cluster CR") + By("Deleting the externalsecretsconfig.operator.openshift.io/cluster CR") loader.DeleteFromFile(testassets.ReadFile, externalSecrets, operatorNamespace) err := utils.DeleteAWSSecret(ctx, clientset, awsSecretName, awsSecretRegionName) diff --git a/test/e2e/testdata/external_secret.yaml b/test/e2e/testdata/external_secret.yaml index 9f849fa2..b34858d8 100644 --- a/test/e2e/testdata/external_secret.yaml +++ b/test/e2e/testdata/external_secret.yaml @@ -1,5 +1,5 @@ apiVersion: operator.openshift.io/v1alpha1 -kind: ExternalSecrets +kind: ExternalSecretsConfig metadata: labels: app.kubernetes.io/name: cluster From 70868e24144f0b8a45f95da9e42cbe4b5c45ff24 Mon Sep 17 00:00:00 2001 From: Bharath B Date: Mon, 29 Sep 2025 14:55:39 +0530 Subject: [PATCH 4/7] ESO-101: incorporate AI code review suggestions --- README.md | 2 +- api/v1alpha1/external_secrets_config_types.go | 4 +- .../external_secrets_manager_types.go | 26 +---- api/v1alpha1/meta.go | 17 --- .../externalsecretsmanager.testsuite.yaml | 6 -- api/v1alpha1/zz_generated.deepcopy.go | 20 ---- ...ecrets-operator.clusterserviceversion.yaml | 10 +- ....openshift.io_externalsecretsmanagers.yaml | 30 ------ ....openshift.io_externalsecretsmanagers.yaml | 30 ------ ...ecrets-operator.clusterserviceversion.yaml | 8 +- docs/api_reference.md | 22 ---- pkg/controller/client/client.go | 3 +- pkg/controller/common/utils.go | 14 +-- pkg/controller/crd_annotator/controller.go | 2 +- .../external_secrets/certificate.go | 10 +- .../external_secrets/certificate_test.go | 2 +- pkg/controller/external_secrets/controller.go | 19 ++-- .../external_secrets/deployments.go | 12 +-- .../install_external_secrets.go | 22 +++- pkg/controller/external_secrets/rbacs.go | 25 +---- pkg/controller/external_secrets/secret.go | 8 +- .../external_secrets/serviceaccounts.go | 9 +- pkg/controller/external_secrets/services.go | 8 +- pkg/controller/external_secrets/suite_test.go | 100 ------------------ pkg/controller/external_secrets/utils.go | 5 +- .../external_secrets/validatingwebhook.go | 7 +- .../external_secrets_manager/controller.go | 5 +- test/apis/README.md | 2 +- 28 files changed, 75 insertions(+), 353 deletions(-) delete mode 100644 pkg/controller/external_secrets/suite_test.go diff --git a/README.md b/README.md index 43bdb20f..f5781495 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ to install application. The operator has three controllers to achieve the same: - `external_secrets` controller: This is responsible for * reconciling the `externalsecretsconfigs.operator.openshift.io` resource. * installing and managing the `external-secrets` application based on the user defined configurations in `externalsecretsconfigs.operator.openshift.io` resource. - * reconciling the `externalsecretsmanagers.operator.openshift.io` resource for the global configurations and updates the `external-scerets` deployment accordingly. + * reconciling the `externalsecretsmanagers.operator.openshift.io` resource for the global configurations and updates the `external-secrets` deployment accordingly. - `crd_annotator` controller: * This is responsible for adding `cert-manager.io/inject-ca-from` annotation in the `external-secrets` provided CRDs. * This is an optional controller, which will be activated only when [`cert-manager`](https://cert-manager.io/) is installed. diff --git a/api/v1alpha1/external_secrets_config_types.go b/api/v1alpha1/external_secrets_config_types.go index e8f78335..930b1c0d 100644 --- a/api/v1alpha1/external_secrets_config_types.go +++ b/api/v1alpha1/external_secrets_config_types.go @@ -72,7 +72,7 @@ type ExternalSecretsConfigSpec struct { // ExternalSecretsConfigStatus is the most recently observed status of the ExternalSecretsConfig. type ExternalSecretsConfigStatus struct { // conditions holds information of the current state of the external-secrets deployment. - ConditionalStatus `json:",inline,omitempty"` + ConditionalStatus `json:",inline"` // externalSecretsImage is the name of the image and the tag used for deploying external-secrets. ExternalSecretsImage string `json:"externalSecretsImage,omitempty"` @@ -95,7 +95,7 @@ type ApplicationConfig struct { WebhookConfig *WebhookConfig `json:"webhookConfig,omitempty"` // +kubebuilder:validation:Optional - CommonConfigs `json:",inline,omitempty"` + CommonConfigs `json:",inline"` } // ControllerConfig is for specifying the configurations for the controller to use while installing the `external-secrets` operand and the plugins. diff --git a/api/v1alpha1/external_secrets_manager_types.go b/api/v1alpha1/external_secrets_manager_types.go index 8c036ed3..b9c423c5 100644 --- a/api/v1alpha1/external_secrets_manager_types.go +++ b/api/v1alpha1/external_secrets_manager_types.go @@ -57,14 +57,6 @@ type ExternalSecretsManagerSpec struct { // globalConfig is for configuring the behavior of deployments that are managed by external secrets-operator. // +kubebuilder:validation:Optional GlobalConfig *GlobalConfig `json:"globalConfig,omitempty"` - - // optionalFeatures is for enabling the optional operator features. - // +patchMergeKey=name - // +patchStrategy=merge - // +listType=map - // +listMapKey=name - // +kubebuilder:validation:Optional - OptionalFeatures []Feature `json:"optionalFeatures,omitempty"` } // GlobalConfig is for configuring the external-secrets-operator behavior. @@ -77,23 +69,7 @@ type GlobalConfig struct { // +kubebuilder:validation:Optional Labels map[string]string `json:"labels,omitempty"` - CommonConfigs `json:",inline,omitempty"` -} - -// Feature is for enabling the optional features. -type Feature struct { - // name of the optional feature. There are no optional features currently supported. - // +kubebuilder:validation:Enum:="" - // +kubebuilder:validation:Required - Name string `json:"name"` - - // mode indicates the feature state. - // Use Enabled or Disabled to indicate the preference. - // Enabled: Enables the optional feature and creates resources if required. - // Disabled: Disables the optional feature, but will not remove any resources created. - // +kubebuilder:validation:Enum:=Enabled;Disabled - // +kubebuilder:validation:Required - Mode Mode `json:"mode"` + CommonConfigs `json:",inline"` } // ExternalSecretsManagerStatus is the most recently observed status of the ExternalSecretsManager. diff --git a/api/v1alpha1/meta.go b/api/v1alpha1/meta.go index 278e38ad..68a273d9 100644 --- a/api/v1alpha1/meta.go +++ b/api/v1alpha1/meta.go @@ -121,21 +121,4 @@ const ( // Disabled indicates the optional configuration is disabled. Disabled Mode = "Disabled" - - // DisabledAndCleanup indicates the optional configuration is disabled and created resources are automatically removed. - DisabledAndCleanup Mode = "DisabledAndCleanup" -) - -// PurgePolicy defines the policy for purging default resources. -type PurgePolicy string - -const ( - // PurgeAll indicates to purge all the created resources. - PurgeAll PurgePolicy = "PurgeAll" - - // PurgeNone indicates to purge none of the created resources. - PurgeNone PurgePolicy = "PurgeNone" - - // PurgeExceptSecrets indicates to purge all the created resources except the Secret resource. - PurgeExceptSecrets PurgePolicy = "PurgeExceptSecrets" ) diff --git a/api/v1alpha1/tests/externalsecretsmanager.operator.openshift.io/externalsecretsmanager.testsuite.yaml b/api/v1alpha1/tests/externalsecretsmanager.operator.openshift.io/externalsecretsmanager.testsuite.yaml index 1e6715f9..2f88c646 100644 --- a/api/v1alpha1/tests/externalsecretsmanager.operator.openshift.io/externalsecretsmanager.testsuite.yaml +++ b/api/v1alpha1/tests/externalsecretsmanager.operator.openshift.io/externalsecretsmanager.testsuite.yaml @@ -69,9 +69,6 @@ tests: httpProxy: "http://proxy.example.com:8080" httpsProxy: "https://proxy.example.com:8443" noProxy: "localhost,127.0.0.1,.local" - optionalFeatures: - - name: "" - mode: Disabled expected: | apiVersion: operator.openshift.io/v1alpha1 kind: ExternalSecretsManager @@ -92,9 +89,6 @@ tests: httpProxy: "http://proxy.example.com:8080" httpsProxy: "https://proxy.example.com:8443" noProxy: "localhost,127.0.0.1,.local" - optionalFeatures: - - name: "" - mode: Disabled - name: Should fail to create with invalid singleton name resourceName: invalid-name initial: | diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 223ccb66..6496238a 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -401,11 +401,6 @@ func (in *ExternalSecretsManagerSpec) DeepCopyInto(out *ExternalSecretsManagerSp *out = new(GlobalConfig) (*in).DeepCopyInto(*out) } - if in.OptionalFeatures != nil { - in, out := &in.OptionalFeatures, &out.OptionalFeatures - *out = make([]Feature, len(*in)) - copy(*out, *in) - } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalSecretsManagerSpec. @@ -441,21 +436,6 @@ func (in *ExternalSecretsManagerStatus) DeepCopy() *ExternalSecretsManagerStatus return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Feature) DeepCopyInto(out *Feature) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Feature. -func (in *Feature) DeepCopy() *Feature { - if in == nil { - return nil - } - out := new(Feature) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GlobalConfig) DeepCopyInto(out *GlobalConfig) { *out = *in diff --git a/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml b/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml index 871cc306..4140d32b 100644 --- a/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml +++ b/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml @@ -220,7 +220,7 @@ metadata: categories: Security console.openshift.io/disable-operand-delete: "true" containerImage: openshift.io/external-secrets-operator:latest - createdAt: "2025-09-26T04:57:30Z" + createdAt: "2025-09-26T11:41:31Z" features.operators.openshift.io/cnf: "false" features.operators.openshift.io/cni: "false" features.operators.openshift.io/csi: "false" @@ -344,10 +344,10 @@ spec: version: v1alpha1 description: External Secrets Operator for Red Hat OpenShift deploys and manages `external-secrets` application in OpenShift clusters. `external-secrets` provides - an uniformed interface to fetch secrets stored in external providers like AWS - Secrets Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM - Cloud Secrets Manager to name a few, stores them as secrets in OpenShift. It provides - APIs to define authentication and the details of the secret to fetch. + an uniform interface to fetch secrets stored in external providers like AWS Secrets + Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM Cloud Secrets + Manager to name a few, stores them as secrets in OpenShift. It provides APIs to + define authentication and the details of the secret to fetch. displayName: External Secrets Operator for Red Hat OpenShift icon: - base64data: iVBORw0KGgoAAAANSUhEUgAAAVQAAAC1CAYAAAAA/QcmAAARRElEQVR4nO3dTWgjaWLG8cfuj2n3eKbkpqGHkNmqueSwJJH6kLlkEtVcwkII1pLLHhYkJ7AJ5OC6DCQnqZlDAlmwOqfksLgMCSTsBMuBXSZLQGXYHLJM4nKyl82lS9tZyGZMS/J0tz1utyuHXgvbbX2/Ukny/weCGUuqel1tPXq/ay6O41gAgKHNJ10AAJgVBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgYqxehqFOoqiv13/huqrPzak+N6cvXFfPPU/HQTCyMgKDmovjOE66EJhtcaOhg1JJR76vuNmUJM1Zlm4WCrrleZp3nEvfdxqmp++5aM6ydCOX081cTjdyuVEVH+gZgYqR289k9HJ3t+3zN/N53fI8XctkWj/7slzWQanUNkwvmrdt3cjlOgY0MGoEKkbqoFTS4YMHPb123rY17zg6iSKd1GoDn/N6Nqs3CgXdLBQGPgYwCAIVI3MSRWq+915i55+37Va3wlwqlVg5cHUwKIWR+dL3Ez3/Sa2mwwcP1HScV90HjUai5cHso4aKkWk6zlBNd9PmLEtveJ4WSqWki4IZRaBiJF6Gofbv30+6GJeat20tVirnBsEAE2jyYyQOy+Wki9DWSa2m/fv3dZRwlwRmDzVUjEQjlep5ylOS3qpWdd11ky4GZgQ1VBj3olKZijCVpGdMrYJBBCqMO6pUki5Cz05qNb0Mw6SLgRlBoMK4F1MUqNJ0fQFgshGoMOrsev1pwUYrMIVAhVHTWNujyQ9TGOWHMXGjocbSUtLFGMjbOzvMS8XQqKHCmGnrOz2LZj9MIFBhzLPvfjfpIgzsmGY/DKDJDyOiKNLtr35VNw4Oki7KQOZtW1YfdxIALkMNFUb4vq+DmzeTLsbATmq1vm7NAlyGQIURvu/r46OjpIsxFEb7MSwCFQM7iSIdB4H+ZW1NtVpN3zk40E/u3k26WAN7wcAUhnQ96QJg8r0MQx0HgY5/ccfS4+3tc88fvvtu67+/trenf71zR7/05Mm4izk0RvoxLAalcKmXYajDcrmnjU4+WljQd84MRlnS1IbqEh8HDIEmP845iSJ94bqv9gvd2OhpGelPLozsNyX95pMnemxZIyrl6FBLxTAIVLS8qFS0n8m81qQfRFNSutnUD+7cGb5gY0Q/KoZBoELSq5rZ069/faCNTX67w3LTbzx5om/dvq2jW7eGKd7YPPv006SLgClGoELScBstf/PatY7Pf/L8ud4/PNSPpmAGwOd7e0kXAVOMQIWOfH+ou5O+s7enD7q85qd6NQPgW7dva3+C+1bv/OxnCpmPigERqDCy5d7fWpZ6iclPnj9XutnU31iWXiwsDH1e0/7v3j0F9KNiQAQqjKwQervZ1D/02KRvSvqzZlO/cnAwccH6b8+e6cff/77iRiPpomAKEagYqrl/1vt7e/r7Pkb1zwbrRwsLarz1lpFyDOr49m19vLene599pqbj6KBUIljRFyb2Q/W5OaPH+8GdO/rGgJP6P5Dk3bun39rfH/vOVacLFD6Q9E+/+NmcZel2uayb3B0VPSBQoS9c18jc07OGCdVTvyvpj999V7/x+ee6eXhopmCX2LcsfbPZ1A/P/Oxiya9ns3rT9zXvOCMrB6YfTX6M5NYfv/PkiX58546+MsQxvifp9x4/1juHh8pK+vbSkv7jnXeMzRL437t39dHCgpwLYSpJe/funfv/4+1t7WcyU31XAoweNVToZRhq//79kRz7xcKC/mRuTp88f2782B9I+oqkD2xbbx4f69clXbvefr+f2smJHs/P63u1mn6oV3247fynbeuX2/Qtv7m+ThcALkWgQtJomv1n/ejuXX1rb08/HdkZzOrWWUGo4jI0+SFJWiiVRnr89/f29Nnt2/rzHuerJqnbIgVJerayoiPfH3VRMGWooaLlaS6nF1tbIz/PvmXp46Ojc1v+TZJP797V+z0uQeX20ziLQEVL3GhoP5MxNi+1m6dLS/q7kxP9RbPZsT9znP5wYUF/2UfQc3M/nEWTHy1zqZQWKxXNjWmt/WK9rj9qNvXfCwv65N49/dpYztpev2EqvVoUcTDi7hJMD2qoeM2R7+vZykoi5366tKR/vnZNf7W3p/8a0zktSd++e1e/P+BOU3OWpRQrqiACFW0kGaqnni4taXdxUX/9+HHXaU6DsCT9qWXpD46Ohl6Vxag/JAIVHRz5vp573kCbTo/C06UlRYuL+vejI/3jz3+uptR3LfYDvdoQ+2uLi/rVx4+Nle1mPq83GfW/8ghUdPQyDPWF605MqLbzP7bd8fnU/r4W6/WRnf9aOq232Uf1yiNQ0dVJFOlpLqeXu7tJF2WiccdUMMqPruYdR28FgW7m80kXBZhoBCp6MpdK6U3f1+Lm5timVU0TrgkkAhV9upHLyYoi3VheTrooE4XVUpAIVAzgdAHA4uam5rsMBl0VBCokAhVDuJHL6e0w1K1i8co3eW+4btJFwARglB9GnESRDkolHW1sJF2UsWOlFE5RQ4UR846jN31f1qNHV242wBuel3QRMCGooWIkrkqNdd629XYYai6VSroomAAEKkbqJIr0pe/ry3J54ldbDYL9UHEWgYqxOfJ9HZbLM7Piig1RcBGBirE7iSIdlst6UamMbTNr0whTXIZARaJeVCo6qlT0olKZii6BedvWYqVCMx+XIlAxMV6Gob70fR0HwcR1C8xZlt7wvJHfzBDTjUDFRDqJIh0HgV4EgY6DILGugXnb1s1CQbc8j5F8dEWgYiqcRJFehqGOw/BVDTYMR9ZFcC2d1nXX1RuFAk179IVAxVQ7DoJXYRtFihsNvTyzyfPx9nbb911Lp1s1zuuuq/lUStcyGV1nCSmGQKACgCEsPQUAQwhUADCEQAUAQwhUADCEQAUAQwhUADCEQAUAQwhUADCEQAUAQwhUADDketIFAM4Kw/DcIwiCpIsE9IxAHSFTYeA4jhzHMXKsSRNFkXzfVxAECsNQzQF3kIqiSFEUtX1+kGsYhqEaHW4PnclklJrALf3CMJTjOBNZtpkXY2QkGXtYlhUvLy/Hm5ubSf9aRhWLxY6/t6njFIvFvsuWzWY7HrNarfZ9TNPq9Xq8ubkZF4vFOJvNxpZlTUzZriJqqFOi2Wxqa2tLW1tbSqfT8n1fGfbqvLLCMFQul1NtSu/JNasYlJpCu7u7un//vnzfT7ooSEij0SBMJxCBOsVWVlYIVWCCEKhTzvO8joMxAMaHQJ1yzWZTJe7ECUwEBqUSVCwW2z7XaDRaczG7TSXa2NhQuVxmmgyQMAI1Qb3ULBuNhgqFgra2tjq+rlKpqFAodDxOpVJpzffcPXPf+3Q6LcdxlMvllMvljATz2XNd5LqucrnczM9SOF2YEIahoii69Msxm80qk8m0rkk7F/9WunXznM7t7eVYMCjpeVuzTF3mlvaqXq/Htm0PNM+yXq/HxWKxNT+x28OyrLhYLMb1er3v37ffc9m23XWuZ68mZR7qzs5OnM/nu/57tbse6+vrl5al32OZuKboH1d3hEz+Ya+urvYdGDs7O3E6nR7oQ2fbdryzs9Nz+XZ2dgYKEVPXKOlArVarxn7/fD7/WlkI1OnAoNSU6LcZHoahXNc917TvR61Wk+u6lzbZL/J9X/fv37/y8yJN/f4bGxsdu28wuehDnRL9zDc9DdNB18WfajabrVBttw4+CAKtrKwMdZ5xiKKo770VOq3jv8h1XWWzWW1vb/dZssudhqrrukaOhzFJuoo8y2So6ZXP57se6+wa/0Gb+e0e2Wz20nLV6/We+0sHffSqW5N/FI+LfajVatXo8ZeXl3v+WxrFNUX/qKEmqNto62mtqltT0rKs1ghxqVTq2szP5/NyXVeO47R2e+pUs9re3lalUnltFNrzvJ5qwbZtnzvfxVkGs6JdLfV0JN9xHGUyGQVBoCiKtLGx0fF4Z2d2ZLPZc881Go2O1zCdTjONLglJJ/os05hqSmcHXDrVGC3LarsL0fr6esdzXKylPnr0qGu5bNtue75qtdp10KefP89JqKHGcRxvbm62rtf6+nrH2RI7Oztda/idrt8g78NoEagjNI4PtW3brQ/t6Ye53WNtba1jedfW1jq+/9GjRz2/Np1O9zT1qlv3RK8mJVDjOD53nbrp9kVGoE4XmvxTzLIsVSqVVtOuUql0fO1pc7OdTCYjy7LaNuODIGiNPnc6l/RqEK2XJucsNkv72ch6VjcOv6oI1Cll27Yqlcq51UadVs80m019+OGHQ53z7BSqTn2uy8vLM78KqpsoilSpVFqrpC7DCP7sIVCnjGVZ8jxPnue9VrvrZc7oMHo9/lUOijAM5XleT9OnTE2xwuQgUKdMs9lUoVC4tKk87LxTUyaxdprP5/ueLO95Xl+zEXzfn4o5uRgdAjVBcRy/9rNGoyHHcTqGY6lUunSif6f+z3E6XVgwSRzH6btM/fTvVioVwhTshzppUqmUPM/r+JqNjY1LB5cmpWZ4FW/93O3fDFcDgTqBPM+TZVkdX3PZooBugfro0SPFr6bKDfQ4G5QXJ5qftbW1NfL+3EkShmHHxRf5fF7ValX1el1xHGtnZ0fValVra2uybXuMJcWoEagTqJda6vb29ms1wW5N2lwu19P69F5eM85zTbpOXx7FYlG+78t13VYXwun+p57njWza1Cxc12lEoE6oXmqpFwdZcrlcxxrP7u6uXNdtO42n0Wi0PuTdapjdBnhqtZocx2m7qUsQBMpkMjOxBLXTdLWkumG4eWMyCNQJ1UsttVarvfbB6fae3d1dvffee8rlciqXywqCQOVyWYVCQY7j6OHDh61dpjp9KB3HUT6f73iuZrOplZUVpVKpVo0sl8vJcRx9+OGHMxGmUufJ+aVSqW1tMYqikd1gcWtrS47jyPM8lUoleZ4n13UnbrBw5ox9bdYVoi5LF3vRbdNiy7JeW+JpcrepdjvIx/F07TY1yg2md3Z2Or7Otu14dXU1LhaLcbFYjFdXV3v+N2q3hLRerw90PdvtHAYzqKFOuG47UjWbTZXL5XM/q1QqXbsLerWystK2eZ9KpWha6lWzPp1Ot32+Vqvp4cOHevDggR48eKCHDx8OXTtPpVIdBwaRDAJ1whUKha4jweVy+Vyz0nEcBUFgLFQrlUrbpmkul9P6+rqR80yzi19q48DN9iYPgToFeqmlXuw7zWQyiqJo6FpMNptVFEUd+wkLhYKq1aqxAJ9GruuO/YvFdV2trq6O9ZzojECdAr3UUjc2Nl6rRaZSKQVBoPX19b7nO6bTaVWrVQVB0NOKodPZA8VisedgXV1dnalAKBQK2tzc7PlaW5al5eXloc5ZLpe1trZ2pb/MJslcHF+y/hFGdKtZ9tNkC4Kg6wqkbqO4YRjK932FYfjaPeJt224tz8zlckNN92k0Gq2dli5Ovzp7jtPA7/R79XqNuh1nkBFu3/c7jsKfzoxo994gCF67O0E2mz13DRqNRsd+6E7nOKvTNZfUumOA67oTs6JuFhGoAGAITX4AMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBD/h+fh1Gb7+ZNngAAAABJRU5ErkJggg== diff --git a/bundle/manifests/operator.openshift.io_externalsecretsmanagers.yaml b/bundle/manifests/operator.openshift.io_externalsecretsmanagers.yaml index 61a88270..11a2023c 100644 --- a/bundle/manifests/operator.openshift.io_externalsecretsmanagers.yaml +++ b/bundle/manifests/operator.openshift.io_externalsecretsmanagers.yaml @@ -1155,36 +1155,6 @@ spec: type: array x-kubernetes-list-type: atomic type: object - optionalFeatures: - description: optionalFeatures is for enabling the optional operator - features. - items: - description: Feature is for enabling the optional features. - properties: - mode: - description: |- - mode indicates the feature state. - Use Enabled or Disabled to indicate the preference. - Enabled: Enables the optional feature and creates resources if required. - Disabled: Disables the optional feature, but will not remove any resources created. - enum: - - Enabled - - Disabled - type: string - name: - description: name of the optional feature. There are no optional - features currently supported. - enum: - - "" - type: string - required: - - mode - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map type: object status: description: status is the most recently observed status of controllers diff --git a/config/crd/bases/operator.openshift.io_externalsecretsmanagers.yaml b/config/crd/bases/operator.openshift.io_externalsecretsmanagers.yaml index 938dbe53..600b11ce 100644 --- a/config/crd/bases/operator.openshift.io_externalsecretsmanagers.yaml +++ b/config/crd/bases/operator.openshift.io_externalsecretsmanagers.yaml @@ -1155,36 +1155,6 @@ spec: type: array x-kubernetes-list-type: atomic type: object - optionalFeatures: - description: optionalFeatures is for enabling the optional operator - features. - items: - description: Feature is for enabling the optional features. - properties: - mode: - description: |- - mode indicates the feature state. - Use Enabled or Disabled to indicate the preference. - Enabled: Enables the optional feature and creates resources if required. - Disabled: Disables the optional feature, but will not remove any resources created. - enum: - - Enabled - - Disabled - type: string - name: - description: name of the optional feature. There are no optional - features currently supported. - enum: - - "" - type: string - required: - - mode - - name - type: object - type: array - x-kubernetes-list-map-keys: - - name - x-kubernetes-list-type: map type: object status: description: status is the most recently observed status of controllers diff --git a/config/manifests/bases/external-secrets-operator.clusterserviceversion.yaml b/config/manifests/bases/external-secrets-operator.clusterserviceversion.yaml index 85cd9c25..a987d204 100644 --- a/config/manifests/bases/external-secrets-operator.clusterserviceversion.yaml +++ b/config/manifests/bases/external-secrets-operator.clusterserviceversion.yaml @@ -130,10 +130,10 @@ spec: version: v1alpha1 description: External Secrets Operator for Red Hat OpenShift deploys and manages `external-secrets` application in OpenShift clusters. `external-secrets` provides - an uniform interface to fetch secrets stored in external providers like AWS - Secrets Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM - Cloud Secrets Manager to name a few, stores them as secrets in OpenShift. It provides - APIs to define authentication and the details of the secret to fetch. + an uniform interface to fetch secrets stored in external providers like AWS Secrets + Manager, HashiCorp Vault, Google Secrets Manager, Azure Key Vault, IBM Cloud Secrets + Manager to name a few, stores them as secrets in OpenShift. It provides APIs to + define authentication and the details of the secret to fetch. displayName: External Secrets Operator for Red Hat OpenShift icon: - base64data: iVBORw0KGgoAAAANSUhEUgAAAVQAAAC1CAYAAAAA/QcmAAARRElEQVR4nO3dTWgjaWLG8cfuj2n3eKbkpqGHkNmqueSwJJH6kLlkEtVcwkII1pLLHhYkJ7AJ5OC6DCQnqZlDAlmwOqfksLgMCSTsBMuBXSZLQGXYHLJM4nKyl82lS9tZyGZMS/J0tz1utyuHXgvbbX2/Ukny/weCGUuqel1tPXq/ay6O41gAgKHNJ10AAJgVBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgAoAhBCoAGEKgYqxehqFOoqiv13/huqrPzak+N6cvXFfPPU/HQTCyMgKDmovjOE66EJhtcaOhg1JJR76vuNmUJM1Zlm4WCrrleZp3nEvfdxqmp++5aM6ydCOX081cTjdyuVEVH+gZgYqR289k9HJ3t+3zN/N53fI8XctkWj/7slzWQanUNkwvmrdt3cjlOgY0MGoEKkbqoFTS4YMHPb123rY17zg6iSKd1GoDn/N6Nqs3CgXdLBQGPgYwCAIVI3MSRWq+915i55+37Va3wlwqlVg5cHUwKIWR+dL3Ez3/Sa2mwwcP1HScV90HjUai5cHso4aKkWk6zlBNd9PmLEtveJ4WSqWki4IZRaBiJF6Gofbv30+6GJeat20tVirnBsEAE2jyYyQOy+Wki9DWSa2m/fv3dZRwlwRmDzVUjEQjlep5ylOS3qpWdd11ky4GZgQ1VBj3olKZijCVpGdMrYJBBCqMO6pUki5Cz05qNb0Mw6SLgRlBoMK4F1MUqNJ0fQFgshGoMOrsev1pwUYrMIVAhVHTWNujyQ9TGOWHMXGjocbSUtLFGMjbOzvMS8XQqKHCmGnrOz2LZj9MIFBhzLPvfjfpIgzsmGY/DKDJDyOiKNLtr35VNw4Oki7KQOZtW1YfdxIALkMNFUb4vq+DmzeTLsbATmq1vm7NAlyGQIURvu/r46OjpIsxFEb7MSwCFQM7iSIdB4H+ZW1NtVpN3zk40E/u3k26WAN7wcAUhnQ96QJg8r0MQx0HgY5/ccfS4+3tc88fvvtu67+/trenf71zR7/05Mm4izk0RvoxLAalcKmXYajDcrmnjU4+WljQd84MRlnS1IbqEh8HDIEmP845iSJ94bqv9gvd2OhpGelPLozsNyX95pMnemxZIyrl6FBLxTAIVLS8qFS0n8m81qQfRFNSutnUD+7cGb5gY0Q/KoZBoELSq5rZ069/faCNTX67w3LTbzx5om/dvq2jW7eGKd7YPPv006SLgClGoELScBstf/PatY7Pf/L8ud4/PNSPpmAGwOd7e0kXAVOMQIWOfH+ou5O+s7enD7q85qd6NQPgW7dva3+C+1bv/OxnCpmPigERqDCy5d7fWpZ6iclPnj9XutnU31iWXiwsDH1e0/7v3j0F9KNiQAQqjKwQervZ1D/02KRvSvqzZlO/cnAwccH6b8+e6cff/77iRiPpomAKEagYqrl/1vt7e/r7Pkb1zwbrRwsLarz1lpFyDOr49m19vLene599pqbj6KBUIljRFyb2Q/W5OaPH+8GdO/rGgJP6P5Dk3bun39rfH/vOVacLFD6Q9E+/+NmcZel2uayb3B0VPSBQoS9c18jc07OGCdVTvyvpj999V7/x+ee6eXhopmCX2LcsfbPZ1A/P/Oxiya9ns3rT9zXvOCMrB6YfTX6M5NYfv/PkiX58546+MsQxvifp9x4/1juHh8pK+vbSkv7jnXeMzRL437t39dHCgpwLYSpJe/funfv/4+1t7WcyU31XAoweNVToZRhq//79kRz7xcKC/mRuTp88f2782B9I+oqkD2xbbx4f69clXbvefr+f2smJHs/P63u1mn6oV3247fynbeuX2/Qtv7m+ThcALkWgQtJomv1n/ejuXX1rb08/HdkZzOrWWUGo4jI0+SFJWiiVRnr89/f29Nnt2/rzHuerJqnbIgVJerayoiPfH3VRMGWooaLlaS6nF1tbIz/PvmXp46Ojc1v+TZJP797V+z0uQeX20ziLQEVL3GhoP5MxNi+1m6dLS/q7kxP9RbPZsT9znP5wYUF/2UfQc3M/nEWTHy1zqZQWKxXNjWmt/WK9rj9qNvXfCwv65N49/dpYztpev2EqvVoUcTDi7hJMD2qoeM2R7+vZykoi5366tKR/vnZNf7W3p/8a0zktSd++e1e/P+BOU3OWpRQrqiACFW0kGaqnni4taXdxUX/9+HHXaU6DsCT9qWXpD46Ohl6Vxag/JAIVHRz5vp573kCbTo/C06UlRYuL+vejI/3jz3+uptR3LfYDvdoQ+2uLi/rVx4+Nle1mPq83GfW/8ghUdPQyDPWF605MqLbzP7bd8fnU/r4W6/WRnf9aOq232Uf1yiNQ0dVJFOlpLqeXu7tJF2WiccdUMMqPruYdR28FgW7m80kXBZhoBCp6MpdK6U3f1+Lm5timVU0TrgkkAhV9upHLyYoi3VheTrooE4XVUpAIVAzgdAHA4uam5rsMBl0VBCokAhVDuJHL6e0w1K1i8co3eW+4btJFwARglB9GnESRDkolHW1sJF2UsWOlFE5RQ4UR846jN31f1qNHV242wBuel3QRMCGooWIkrkqNdd629XYYai6VSroomAAEKkbqJIr0pe/ry3J54ldbDYL9UHEWgYqxOfJ9HZbLM7Piig1RcBGBirE7iSIdlst6UamMbTNr0whTXIZARaJeVCo6qlT0olKZii6BedvWYqVCMx+XIlAxMV6Gob70fR0HwcR1C8xZlt7wvJHfzBDTjUDFRDqJIh0HgV4EgY6DILGugXnb1s1CQbc8j5F8dEWgYiqcRJFehqGOw/BVDTYMR9ZFcC2d1nXX1RuFAk179IVAxVQ7DoJXYRtFihsNvTyzyfPx9nbb911Lp1s1zuuuq/lUStcyGV1nCSmGQKACgCEsPQUAQwhUADCEQAUAQwhUADCEQAUAQwhUADCEQAUAQwhUADCEQAUAQwhUADDketIFAM4Kw/DcIwiCpIsE9IxAHSFTYeA4jhzHMXKsSRNFkXzfVxAECsNQzQF3kIqiSFEUtX1+kGsYhqEaHW4PnclklJrALf3CMJTjOBNZtpkXY2QkGXtYlhUvLy/Hm5ubSf9aRhWLxY6/t6njFIvFvsuWzWY7HrNarfZ9TNPq9Xq8ubkZF4vFOJvNxpZlTUzZriJqqFOi2Wxqa2tLW1tbSqfT8n1fGfbqvLLCMFQul1NtSu/JNasYlJpCu7u7un//vnzfT7ooSEij0SBMJxCBOsVWVlYIVWCCEKhTzvO8joMxAMaHQJ1yzWZTJe7ECUwEBqUSVCwW2z7XaDRaczG7TSXa2NhQuVxmmgyQMAI1Qb3ULBuNhgqFgra2tjq+rlKpqFAodDxOpVJpzffcPXPf+3Q6LcdxlMvllMvljATz2XNd5LqucrnczM9SOF2YEIahoii69Msxm80qk8m0rkk7F/9WunXznM7t7eVYMCjpeVuzTF3mlvaqXq/Htm0PNM+yXq/HxWKxNT+x28OyrLhYLMb1er3v37ffc9m23XWuZ68mZR7qzs5OnM/nu/57tbse6+vrl5al32OZuKboH1d3hEz+Ya+urvYdGDs7O3E6nR7oQ2fbdryzs9Nz+XZ2dgYKEVPXKOlArVarxn7/fD7/WlkI1OnAoNSU6LcZHoahXNc917TvR61Wk+u6lzbZL/J9X/fv37/y8yJN/f4bGxsdu28wuehDnRL9zDc9DdNB18WfajabrVBttw4+CAKtrKwMdZ5xiKKo770VOq3jv8h1XWWzWW1vb/dZssudhqrrukaOhzFJuoo8y2So6ZXP57se6+wa/0Gb+e0e2Wz20nLV6/We+0sHffSqW5N/FI+LfajVatXo8ZeXl3v+WxrFNUX/qKEmqNto62mtqltT0rKs1ghxqVTq2szP5/NyXVeO47R2e+pUs9re3lalUnltFNrzvJ5qwbZtnzvfxVkGs6JdLfV0JN9xHGUyGQVBoCiKtLGx0fF4Z2d2ZLPZc881Go2O1zCdTjONLglJJ/os05hqSmcHXDrVGC3LarsL0fr6esdzXKylPnr0qGu5bNtue75qtdp10KefP89JqKHGcRxvbm62rtf6+nrH2RI7Oztda/idrt8g78NoEagjNI4PtW3brQ/t6Ye53WNtba1jedfW1jq+/9GjRz2/Np1O9zT1qlv3RK8mJVDjOD53nbrp9kVGoE4XmvxTzLIsVSqVVtOuUql0fO1pc7OdTCYjy7LaNuODIGiNPnc6l/RqEK2XJucsNkv72ch6VjcOv6oI1Cll27Yqlcq51UadVs80m019+OGHQ53z7BSqTn2uy8vLM78KqpsoilSpVFqrpC7DCP7sIVCnjGVZ8jxPnue9VrvrZc7oMHo9/lUOijAM5XleT9OnTE2xwuQgUKdMs9lUoVC4tKk87LxTUyaxdprP5/ueLO95Xl+zEXzfn4o5uRgdAjVBcRy/9rNGoyHHcTqGY6lUunSif6f+z3E6XVgwSRzH6btM/fTvVioVwhTshzppUqmUPM/r+JqNjY1LB5cmpWZ4FW/93O3fDFcDgTqBPM+TZVkdX3PZooBugfro0SPFr6bKDfQ4G5QXJ5qftbW1NfL+3EkShmHHxRf5fF7ValX1el1xHGtnZ0fValVra2uybXuMJcWoEagTqJda6vb29ms1wW5N2lwu19P69F5eM85zTbpOXx7FYlG+78t13VYXwun+p57njWza1Cxc12lEoE6oXmqpFwdZcrlcxxrP7u6uXNdtO42n0Wi0PuTdapjdBnhqtZocx2m7qUsQBMpkMjOxBLXTdLWkumG4eWMyCNQJ1UsttVarvfbB6fae3d1dvffee8rlciqXywqCQOVyWYVCQY7j6OHDh61dpjp9KB3HUT6f73iuZrOplZUVpVKpVo0sl8vJcRx9+OGHMxGmUufJ+aVSqW1tMYqikd1gcWtrS47jyPM8lUoleZ4n13UnbrBw5ox9bdYVoi5LF3vRbdNiy7JeW+JpcrepdjvIx/F07TY1yg2md3Z2Or7Otu14dXU1LhaLcbFYjFdXV3v+N2q3hLRerw90PdvtHAYzqKFOuG47UjWbTZXL5XM/q1QqXbsLerWystK2eZ9KpWha6lWzPp1Ot32+Vqvp4cOHevDggR48eKCHDx8OXTtPpVIdBwaRDAJ1whUKha4jweVy+Vyz0nEcBUFgLFQrlUrbpmkul9P6+rqR80yzi19q48DN9iYPgToFeqmlXuw7zWQyiqJo6FpMNptVFEUd+wkLhYKq1aqxAJ9GruuO/YvFdV2trq6O9ZzojECdAr3UUjc2Nl6rRaZSKQVBoPX19b7nO6bTaVWrVQVB0NOKodPZA8VisedgXV1dnalAKBQK2tzc7PlaW5al5eXloc5ZLpe1trZ2pb/MJslcHF+y/hFGdKtZ9tNkC4Kg6wqkbqO4YRjK932FYfjaPeJt224tz8zlckNN92k0Gq2dli5Ovzp7jtPA7/R79XqNuh1nkBFu3/c7jsKfzoxo994gCF67O0E2mz13DRqNRsd+6E7nOKvTNZfUumOA67oTs6JuFhGoAGAITX4AMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBDCFQAMIRABQBD/h+fh1Gb7+ZNngAAAABJRU5ErkJggg== diff --git a/docs/api_reference.md b/docs/api_reference.md index f576be8f..e139176f 100644 --- a/docs/api_reference.md +++ b/docs/api_reference.md @@ -317,7 +317,6 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `globalConfig` _[GlobalConfig](#globalconfig)_ | globalConfig is for configuring the behavior of deployments that are managed by external secrets-operator. | | Optional: \{\}
| -| `optionalFeatures` _[Feature](#feature) array_ | optionalFeatures is for enabling the optional operator features. | | Optional: \{\}
| #### ExternalSecretsManagerStatus @@ -337,23 +336,6 @@ _Appears in:_ | `lastTransitionTime` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#time-v1-meta)_ | lastTransitionTime is the last time the condition transitioned from one status to another. | | Format: date-time
Type: string
| -#### Feature - - - -Feature is for enabling the optional features. - - - -_Appears in:_ -- [ExternalSecretsManagerSpec](#externalsecretsmanagerspec) - -| Field | Description | Default | Validation | -| --- | --- | --- | --- | -| `name` _string_ | name of the optional feature. There are no optional features currently supported. | | Enum: []
Required: \{\}
| -| `mode` _[Mode](#mode)_ | mode indicates the feature state.
Use Enabled or Disabled to indicate the preference.
Enabled: Enables the optional feature and creates resources if required.
Disabled: Disables the optional feature, but will not remove any resources created. | | Enum: [Enabled Disabled]
Required: \{\}
| - - #### GlobalConfig @@ -387,13 +369,11 @@ Mode indicates the operational state of the optional features. _Appears in:_ - [BitwardenSecretManagerProvider](#bitwardensecretmanagerprovider) - [CertManagerConfig](#certmanagerconfig) -- [Feature](#feature) | Field | Description | | --- | --- | | `Enabled` | Enabled indicates the optional configuration is enabled.
| | `Disabled` | Disabled indicates the optional configuration is disabled.
| -| `DisabledAndCleanup` | DisabledAndCleanup indicates the optional configuration is disabled and created resources are automatically removed.
| #### ObjectReference @@ -450,8 +430,6 @@ _Appears in:_ | `noProxy` _string_ | noProxy is a comma-separated list of hostnames and/or CIDRs and/or IPs for which the proxy should not be used.
This field can have a maximum of 4096 characters. | | MaxLength: 4096
MinLength: 0
Optional: \{\}
| - - #### SecretReference diff --git a/pkg/controller/client/client.go b/pkg/controller/client/client.go index 99c6ed54..33d270cd 100644 --- a/pkg/controller/client/client.go +++ b/pkg/controller/client/client.go @@ -6,7 +6,6 @@ import ( "reflect" "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" @@ -63,7 +62,7 @@ func (c *CtrlClientImpl) Update( func (c *CtrlClientImpl) UpdateWithRetry( ctx context.Context, obj client.Object, opts ...client.UpdateOption, ) error { - key := types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()} + key := client.ObjectKeyFromObject(obj) if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { current := reflect.New(reflect.TypeOf(obj).Elem()).Interface().(client.Object) if err := c.Client.Get(ctx, key, current); err != nil { diff --git a/pkg/controller/common/utils.go b/pkg/controller/common/utils.go index bc9405fa..bc68a4be 100644 --- a/pkg/controller/common/utils.go +++ b/pkg/controller/common/utils.go @@ -15,7 +15,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -61,6 +60,9 @@ func init() { func UpdateResourceLabels(obj client.Object, labels map[string]string) { l := obj.GetLabels() + if l == nil { + l = make(map[string]string, len(labels)) + } for k, v := range labels { l[k] = v } @@ -402,7 +404,7 @@ func IsInjectCertManagerAnnotationEnabled(esc *operatorv1alpha1.ExternalSecretsC // AddFinalizer adds finalizer to the passed resource object. func AddFinalizer(ctx context.Context, obj client.Object, opClient operatorclient.CtrlClient, finalizer string) error { - namespacedName := types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()} + namespacedName := client.ObjectKeyFromObject(obj) if !controllerutil.ContainsFinalizer(obj, finalizer) { if !controllerutil.AddFinalizer(obj, finalizer) { return fmt.Errorf("failed to create %q object with finalizers added", namespacedName) @@ -435,14 +437,14 @@ func AddFinalizer(ctx context.Context, obj client.Object, opClient operatorclien // RemoveFinalizer removes finalizers added from the passed resource object. func RemoveFinalizer(ctx context.Context, obj client.Object, opClient operatorclient.CtrlClient, finalizer string) error { - namespacedName := types.NamespacedName{Name: obj.GetName(), Namespace: obj.GetNamespace()} + namespacedName := client.ObjectKeyFromObject(obj) if controllerutil.ContainsFinalizer(obj, finalizer) { if !controllerutil.RemoveFinalizer(obj, finalizer) { - return fmt.Errorf("failed to update %q externalsecretsconfigs.operator.openshift.io object with finalizers removed", namespacedName) + return fmt.Errorf("failed to remove finalizers on %q", namespacedName) } if err := opClient.UpdateWithRetry(ctx, obj); err != nil { - return fmt.Errorf("failed to remove finalizers on %q externalsecretsconfigs.operator.openshift.io with %w", namespacedName, err) + return fmt.Errorf("update failed to remove finalizers on %q: %w", namespacedName, err) } return nil } @@ -490,7 +492,7 @@ func DeleteObject(ctx context.Context, ctrlClient operatorclient.CtrlClient, obj default: panic(fmt.Sprintf("unsupported object type: %T", obj)) } - exists, err := ctrlClient.Exists(ctx, types.NamespacedName{Name: o.GetName(), Namespace: o.GetNamespace()}, o) + exists, err := ctrlClient.Exists(ctx, client.ObjectKeyFromObject(o), o) if err != nil { return err } diff --git a/pkg/controller/crd_annotator/controller.go b/pkg/controller/crd_annotator/controller.go index 2231651f..2706142f 100644 --- a/pkg/controller/crd_annotator/controller.go +++ b/pkg/controller/crd_annotator/controller.go @@ -302,7 +302,7 @@ func (r *Reconciler) updateCondition(esc *operatorv1alpha1.ExternalSecretsConfig // updateStatus is for updating the status subresource of externalsecretsconfigs.operator.openshift.io. func (r *Reconciler) updateStatus(ctx context.Context, changed *operatorv1alpha1.ExternalSecretsConfig) error { - namespacedName := types.NamespacedName{Name: changed.Name, Namespace: changed.Namespace} + namespacedName := client.ObjectKeyFromObject(changed) if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { r.log.V(4).Info("updating externalsecretsconfigs.operator.openshift.io status", "request", namespacedName) current := &operatorv1alpha1.ExternalSecretsConfig{} diff --git a/pkg/controller/external_secrets/certificate.go b/pkg/controller/external_secrets/certificate.go index a0a96bfd..e90941e7 100644 --- a/pkg/controller/external_secrets/certificate.go +++ b/pkg/controller/external_secrets/certificate.go @@ -53,11 +53,7 @@ func (r *Reconciler) createOrApplyCertificate(esc *operatorv1alpha1.ExternalSecr certificateName := fmt.Sprintf("%s/%s", desired.GetNamespace(), desired.GetName()) r.log.V(4).Info("reconciling certificate resource", "name", certificateName) fetched := &certmanagerv1.Certificate{} - key := types.NamespacedName{ - Name: desired.GetName(), - Namespace: desired.GetNamespace(), - } - exist, err := r.Exists(r.ctx, key, fetched) + exist, err := r.Exists(r.ctx, client.ObjectKeyFromObject(desired), fetched) if err != nil { return common.FromClientError(err, "failed to check %s certificate resource already exists", certificateName) } @@ -103,10 +99,10 @@ func (r *Reconciler) updateCertificateParams(esc *operatorv1alpha1.ExternalSecre certManageConfig = esc.Spec.ControllerConfig.CertProvider.CertManager } if reflect.ValueOf(certManageConfig.IssuerRef).IsZero() { - return fmt.Errorf("certManageConfig is enabled without IssuerRef") + return fmt.Errorf("cert-manager is enabled but issuerRef is not configured") } if certManageConfig.IssuerRef.Name == "" { - return fmt.Errorf("issuerRef.Name not present") + return fmt.Errorf("cert-manager.issuerRef.name is not configured") } externalSecretsNamespace := getNamespace(esc) diff --git a/pkg/controller/external_secrets/certificate_test.go b/pkg/controller/external_secrets/certificate_test.go index 5f3fcd0b..2fbfc114 100644 --- a/pkg/controller/external_secrets/certificate_test.go +++ b/pkg/controller/external_secrets/certificate_test.go @@ -86,7 +86,7 @@ func TestCreateOrApplyCertificates(t *testing.T) { esc.Spec.ControllerConfig.CertProvider.CertManager.IssuerRef.Kind = "Issuer" }, recon: false, - wantErr: fmt.Sprintf("failed to update certificate resource for %s/%s deployment: issuerRef.Name not present", commontest.TestExternalSecretsNamespace, testExternalSecretsConfigForCertificate().GetName()), + wantErr: fmt.Sprintf("failed to update certificate resource for %s/%s deployment: cert-manager.issuerRef.name is not configured", commontest.TestExternalSecretsNamespace, testExternalSecretsConfigForCertificate().GetName()), }, { name: "reconciliation of webhook certificate fails while checking if exists", diff --git a/pkg/controller/external_secrets/controller.go b/pkg/controller/external_secrets/controller.go index 68bca661..dfc75064 100644 --- a/pkg/controller/external_secrets/controller.go +++ b/pkg/controller/external_secrets/controller.go @@ -342,20 +342,13 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu } if !esc.DeletionTimestamp.IsZero() { - r.log.V(1).Info("externalsecretsconfigs.operator.openshift.io is marked for deletion", "namespace", req.NamespacedName) + r.log.V(1).Info("externalsecretsconfigs.operator.openshift.io is marked for deletion", "name", req.NamespacedName) - if requeue, err := r.cleanUp(esc); err != nil { + if requeue, err := r.cleanUp(esc, req); err != nil { return ctrl.Result{}, fmt.Errorf("clean up failed for %q externalsecretsconfigs.operator.openshift.io instance deletion: %w", req.NamespacedName, err) } else if requeue { return ctrl.Result{RequeueAfter: common.DefaultRequeueTime}, nil } - - if err := common.RemoveFinalizer(ctx, esc, r.CtrlClient, finalizer); err != nil { - return ctrl.Result{}, err - } - - r.log.V(1).Info("removed finalizer, cleanup complete", "request", req.NamespacedName) - return ctrl.Result{}, nil } // Set finalizers on the externalsecretsconfigs.operator.openshift.io resource @@ -455,8 +448,14 @@ func (r *Reconciler) processReconcileRequest(esc *operatorv1alpha1.ExternalSecre } // cleanUp handles deletion of externalsecretsconfigs.operator.openshift.io gracefully. -func (r *Reconciler) cleanUp(esc *operatorv1alpha1.ExternalSecretsConfig) (bool, error) { +func (r *Reconciler) cleanUp(esc *operatorv1alpha1.ExternalSecretsConfig, req ctrl.Request) (bool, error) { // TODO: For GA, handle cleaning up of resources created for installing external-secrets operand. r.eventRecorder.Eventf(esc, corev1.EventTypeWarning, "RemoveDeployment", "%s/%s externalsecretsconfigs.operator.openshift.io marked for deletion, remove reference in deployment and remove all resources created for deployment", esc.GetNamespace(), esc.GetName()) + + if err := common.RemoveFinalizer(r.ctx, esc, r.CtrlClient, finalizer); err != nil { + return true, err + } + r.log.V(1).Info("removed finalizer, cleanup complete", "request", req.NamespacedName) + return false, nil } diff --git a/pkg/controller/external_secrets/deployments.go b/pkg/controller/external_secrets/deployments.go index 80a1a682..32ed82c0 100644 --- a/pkg/controller/external_secrets/deployments.go +++ b/pkg/controller/external_secrets/deployments.go @@ -8,11 +8,11 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" - "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" "k8s.io/kubernetes/pkg/apis/core" corevalidation "k8s.io/kubernetes/pkg/apis/core/validation" "k8s.io/utils/ptr" + "sigs.k8s.io/controller-runtime/pkg/client" operatorv1alpha1 "github.com/openshift/external-secrets-operator/api/v1alpha1" "github.com/openshift/external-secrets-operator/pkg/controller/common" @@ -74,12 +74,8 @@ func (r *Reconciler) createOrApplyDeploymentFromAsset(esc *operatorv1alpha1.Exte } deploymentName := fmt.Sprintf("%s/%s", deployment.GetNamespace(), deployment.GetName()) - key := types.NamespacedName{ - Name: deployment.GetName(), - Namespace: deployment.GetNamespace(), - } fetched := &appsv1.Deployment{} - exist, err := r.Exists(r.ctx, key, fetched) + exist, err := r.Exists(r.ctx, client.ObjectKeyFromObject(deployment), fetched) if err != nil { return common.FromClientError(err, "failed to check %s deployment resource already exists", deploymentName) } @@ -124,7 +120,7 @@ func (r *Reconciler) getDeploymentObject(assetName string, esc *operatorv1alpha1 case controllerDeploymentAssetName: updateContainerSpec(deployment, esc, image, logLevel) case webhookDeploymentAssetName: - checkInterval := "5m" + var checkInterval string if esc.Spec.ApplicationConfig.WebhookConfig != nil && esc.Spec.ApplicationConfig.WebhookConfig.CertificateCheckInterval != nil { checkInterval = esc.Spec.ApplicationConfig.WebhookConfig.CertificateCheckInterval.Duration.String() @@ -133,7 +129,7 @@ func (r *Reconciler) getDeploymentObject(assetName string, esc *operatorv1alpha1 case certControllerDeploymentAssetName: updateCertControllerContainerSpec(deployment, image, logLevel) case bitwardenDeploymentAssetName: - deployment.Labels["app.kubernetes.io/version"] = bitwardenImageVersionEnvVarName + deployment.Labels["app.kubernetes.io/version"] = os.Getenv(bitwardenImageVersionEnvVarName) updateBitwardenServerContainerSpec(deployment, bitwardenImage) } diff --git a/pkg/controller/external_secrets/install_external_secrets.go b/pkg/controller/external_secrets/install_external_secrets.go index b7b6ba44..388b324a 100644 --- a/pkg/controller/external_secrets/install_external_secrets.go +++ b/pkg/controller/external_secrets/install_external_secrets.go @@ -7,6 +7,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" operatorv1alpha1 "github.com/openshift/external-secrets-operator/api/v1alpha1" "github.com/openshift/external-secrets-operator/pkg/controller/common" @@ -112,12 +113,23 @@ func (r *Reconciler) createOrApplyNamespace(esc *operatorv1alpha1.ExternalSecret Labels: resourceLabels, }, } - if err := r.Create(r.ctx, obj); err != nil { - if errors.IsAlreadyExists(err) { - r.log.V(4).Info("namespace already exists", "namespace", namespace) - return nil + + got := &corev1.Namespace{} + err := r.Get(r.ctx, client.ObjectKeyFromObject(obj), got) + if err != nil { + if !errors.IsNotFound(err) { + return fmt.Errorf("failed to fetch %s namespace: %w", namespace, err) + } + if err = r.Create(r.ctx, obj); err != nil { + return fmt.Errorf("failed to create %s namespace: %w", namespace, err) + } + return nil + } + if common.ObjectMetadataModified(obj, got) { + common.UpdateResourceLabels(got, resourceLabels) + if err = r.Update(r.ctx, obj); err != nil { + return fmt.Errorf("failed to update %s namespace with labels: %w", namespace, err) } - return err } return nil } diff --git a/pkg/controller/external_secrets/rbacs.go b/pkg/controller/external_secrets/rbacs.go index 8b47c582..068d5128 100644 --- a/pkg/controller/external_secrets/rbacs.go +++ b/pkg/controller/external_secrets/rbacs.go @@ -5,7 +5,6 @@ import ( corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" - "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" operatorv1alpha1 "github.com/openshift/external-secrets-operator/api/v1alpha1" @@ -111,15 +110,11 @@ func (r *Reconciler) createOrApplyClusterRole(esc *operatorv1alpha1.ExternalSecr var ( exist bool err error - key types.NamespacedName clusterRoleName = obj.GetName() fetched = &rbacv1.ClusterRole{} ) - key = types.NamespacedName{ - Name: clusterRoleName, - } - exist, err = r.Exists(r.ctx, key, fetched) + exist, err = r.Exists(r.ctx, client.ObjectKeyFromObject(obj), fetched) if err != nil { return common.FromClientError(err, "failed to check %s clusterrole resource already exists", clusterRoleName) } @@ -159,15 +154,11 @@ func (r *Reconciler) createOrApplyClusterRoleBinding(esc *operatorv1alpha1.Exter var ( exist bool err error - key types.NamespacedName clusterRoleBindingName = obj.GetName() fetched = &rbacv1.ClusterRoleBinding{} ) r.log.V(4).Info("reconciling clusterrolebinding resource", "name", clusterRoleBindingName) - key = types.NamespacedName{ - Name: clusterRoleBindingName, - } - exist, err = r.Exists(r.ctx, key, fetched) + exist, err = r.Exists(r.ctx, client.ObjectKeyFromObject(obj), fetched) if err != nil { return common.FromClientError(err, "failed to check %s clusterrolebinding resource already exists", clusterRoleBindingName) } @@ -209,11 +200,7 @@ func (r *Reconciler) createOrApplyRole(esc *operatorv1alpha1.ExternalSecretsConf roleName := fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName()) r.log.V(4).Info("reconciling role resource", "name", roleName) fetched := &rbacv1.Role{} - key := types.NamespacedName{ - Name: obj.GetName(), - Namespace: obj.GetNamespace(), - } - exist, err := r.Exists(r.ctx, key, fetched) + exist, err := r.Exists(r.ctx, client.ObjectKeyFromObject(obj), fetched) if err != nil { return common.FromClientError(err, "failed to check %s role resource already exists", roleName) } @@ -254,11 +241,7 @@ func (r *Reconciler) createOrApplyRoleBinding(esc *operatorv1alpha1.ExternalSecr roleBindingName := fmt.Sprintf("%s/%s", obj.GetNamespace(), obj.GetName()) r.log.V(4).Info("reconciling rolebinding resource", "name", roleBindingName) fetched := &rbacv1.RoleBinding{} - key := types.NamespacedName{ - Name: obj.GetName(), - Namespace: obj.GetNamespace(), - } - exist, err := r.Exists(r.ctx, key, fetched) + exist, err := r.Exists(r.ctx, client.ObjectKeyFromObject(obj), fetched) if err != nil { return common.FromClientError(err, "failed to check %s rolebinding resource already exists", roleBindingName) } diff --git a/pkg/controller/external_secrets/secret.go b/pkg/controller/external_secrets/secret.go index 136c55ca..a2bc47ea 100644 --- a/pkg/controller/external_secrets/secret.go +++ b/pkg/controller/external_secrets/secret.go @@ -4,7 +4,7 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" operatorv1alpha1 "github.com/openshift/external-secrets-operator/api/v1alpha1" "github.com/openshift/external-secrets-operator/pkg/controller/common" @@ -29,12 +29,8 @@ func (r *Reconciler) createOrApplySecret(esc *operatorv1alpha1.ExternalSecretsCo secretName := fmt.Sprintf("%s/%s", desired.GetNamespace(), desired.GetName()) r.log.V(4).Info("reconciling secret resource", "name", secretName) fetched := &corev1.Secret{} - key := types.NamespacedName{ - Name: desired.GetName(), - Namespace: desired.GetNamespace(), - } - exist, err := r.Exists(r.ctx, key, fetched) + exist, err := r.Exists(r.ctx, client.ObjectKeyFromObject(desired), fetched) if err != nil { return common.FromClientError(err, "failed to check %s secret resource already exists", secretName) } diff --git a/pkg/controller/external_secrets/serviceaccounts.go b/pkg/controller/external_secrets/serviceaccounts.go index f9601b28..432a9307 100644 --- a/pkg/controller/external_secrets/serviceaccounts.go +++ b/pkg/controller/external_secrets/serviceaccounts.go @@ -4,7 +4,7 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" operatorv1alpha1 "github.com/openshift/external-secrets-operator/api/v1alpha1" "github.com/openshift/external-secrets-operator/pkg/controller/common" @@ -50,13 +50,8 @@ func (r *Reconciler) createOrApplyServiceAccounts(esc *operatorv1alpha1.External serviceAccountName := fmt.Sprintf("%s/%s", desired.GetNamespace(), desired.GetName()) r.log.V(4).Info("reconciling serviceaccount resource", "name", serviceAccountName) - key := types.NamespacedName{ - Name: desired.GetName(), - Namespace: desired.GetNamespace(), - } - fetched := &corev1.ServiceAccount{} - exist, err := r.Exists(r.ctx, key, fetched) + exist, err := r.Exists(r.ctx, client.ObjectKeyFromObject(desired), fetched) if err != nil { return common.FromClientError(err, "failed to check if serviceaccount %s exists", serviceAccountName) } diff --git a/pkg/controller/external_secrets/services.go b/pkg/controller/external_secrets/services.go index 69b893bc..cb023865 100644 --- a/pkg/controller/external_secrets/services.go +++ b/pkg/controller/external_secrets/services.go @@ -4,7 +4,7 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" operatorv1alpha1 "github.com/openshift/external-secrets-operator/api/v1alpha1" "github.com/openshift/external-secrets-operator/pkg/controller/common" @@ -57,11 +57,7 @@ func (r *Reconciler) createOrApplyServiceFromAsset(esc *operatorv1alpha1.Externa r.log.V(4).Info("Reconciling service", "name", serviceName) fetched := &corev1.Service{} - key := types.NamespacedName{ - Name: service.GetName(), - Namespace: service.GetNamespace(), - } - exists, err := r.Exists(r.ctx, key, fetched) + exists, err := r.Exists(r.ctx, client.ObjectKeyFromObject(service), fetched) if err != nil { return common.FromClientError(err, "failed to check existence of service %s", serviceName) } diff --git a/pkg/controller/external_secrets/suite_test.go b/pkg/controller/external_secrets/suite_test.go deleted file mode 100644 index 80df5090..00000000 --- a/pkg/controller/external_secrets/suite_test.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2025. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package external_secrets - -import ( - "context" - "fmt" - "path/filepath" - "runtime" - "testing" - - . "github.com/onsi/ginkgo/v2" - . "github.com/onsi/gomega" - - k8sruntime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" - logf "sigs.k8s.io/controller-runtime/pkg/log" - "sigs.k8s.io/controller-runtime/pkg/log/zap" - - operatorv1alpha1 "github.com/openshift/external-secrets-operator/api/v1alpha1" - // +kubebuilder:scaffold:imports -) - -// These tests use Ginkgo (BDD-style Go testing framework). Refer to -// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. - -var ( - cfg *rest.Config - k8sClient client.Client - testEnv *envtest.Environment - ctx context.Context - cancel context.CancelFunc - - scheme = k8sruntime.NewScheme() -) - -func TestControllers(t *testing.T) { - RegisterFailHandler(Fail) - - RunSpecs(t, "Controller Suite") -} - -var _ = BeforeSuite(func() { - logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true))) - - ctx, cancel = context.WithCancel(context.TODO()) - - By("bootstrapping test environment") - testEnv = &envtest.Environment{ - CRDDirectoryPaths: []string{filepath.Join("..", "..", "config", "crd", "bases")}, - ErrorIfCRDPathMissing: true, - - // The BinaryAssetsDirectory is only required if you want to run the tests directly - // without call the makefile target test. If not informed it will look for the - // default path defined in controller-runtime which is /usr/local/kubebuilder/. - // Note that you must have the required binaries setup under the bin directory to perform - // the tests directly. When we run make test it will be setup and used automatically. - BinaryAssetsDirectory: filepath.Join("..", "..", "bin", "k8s", - fmt.Sprintf("1.31.0-%s-%s", runtime.GOOS, runtime.GOARCH)), - } - - var err error - // cfg is defined in this file globally. - cfg, err = testEnv.Start() - Expect(err).NotTo(HaveOccurred()) - Expect(cfg).NotTo(BeNil()) - - err = operatorv1alpha1.AddToScheme(scheme) - Expect(err).NotTo(HaveOccurred()) - - // +kubebuilder:scaffold:scheme - - k8sClient, err = client.New(cfg, client.Options{Scheme: scheme}) - Expect(err).NotTo(HaveOccurred()) - Expect(k8sClient).NotTo(BeNil()) - -}) - -var _ = AfterSuite(func() { - By("tearing down the test environment") - cancel() - err := testEnv.Stop() - Expect(err).NotTo(HaveOccurred()) -}) diff --git a/pkg/controller/external_secrets/utils.go b/pkg/controller/external_secrets/utils.go index c7c38b35..be1e2839 100644 --- a/pkg/controller/external_secrets/utils.go +++ b/pkg/controller/external_secrets/utils.go @@ -4,7 +4,6 @@ import ( "context" "fmt" - "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" @@ -54,7 +53,7 @@ func (r *Reconciler) updateCondition(esc *operatorv1alpha1.ExternalSecretsConfig // updateStatus is for updating the status subresource of externalsecretsconfigs.operator.openshift.io. func (r *Reconciler) updateStatus(ctx context.Context, changed *operatorv1alpha1.ExternalSecretsConfig) error { - namespacedName := types.NamespacedName{Name: changed.Name, Namespace: changed.Namespace} + namespacedName := client.ObjectKeyFromObject(changed) if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { r.log.V(4).Info("updating externalsecretsconfigs.operator.openshift.io status", "request", namespacedName) current := &operatorv1alpha1.ExternalSecretsConfig{} @@ -80,7 +79,7 @@ func (r *Reconciler) updateStatus(ctx context.Context, changed *operatorv1alpha1 func (r *Reconciler) validateExternalSecretsConfig(esc *operatorv1alpha1.ExternalSecretsConfig) error { if isCertManagerConfigEnabled(esc) { if _, ok := r.optionalResourcesList[certificateCRDGKV]; !ok { - return fmt.Errorf("spec.certManagerConfig.enabled is set, but cert-manager is not installed") + return fmt.Errorf("spec.controllerConfig.certProvider.certManager.mode is set, but cert-manager is not installed") } } return nil diff --git a/pkg/controller/external_secrets/validatingwebhook.go b/pkg/controller/external_secrets/validatingwebhook.go index 729c15bf..d5013e9d 100644 --- a/pkg/controller/external_secrets/validatingwebhook.go +++ b/pkg/controller/external_secrets/validatingwebhook.go @@ -5,7 +5,7 @@ import ( webhook "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" operatorv1alpha1 "github.com/openshift/external-secrets-operator/api/v1alpha1" "github.com/openshift/external-secrets-operator/pkg/controller/common" @@ -22,10 +22,7 @@ func (r *Reconciler) createOrApplyValidatingWebhookConfiguration(esc *operatorv1 validatingWebhookName := desired.GetName() r.log.V(4).Info("reconciling validatingWebhook resource", "name", validatingWebhookName) fetched := &webhook.ValidatingWebhookConfiguration{} - key := types.NamespacedName{ - Name: desired.GetName(), - } - exist, err := r.Exists(r.ctx, key, fetched) + exist, err := r.Exists(r.ctx, client.ObjectKeyFromObject(desired), fetched) if err != nil { return common.FromClientError(err, "failed to check %s validatingWebhook resource already exists", validatingWebhookName) } diff --git a/pkg/controller/external_secrets_manager/controller.go b/pkg/controller/external_secrets_manager/controller.go index 5422c6ea..8fb4dd4c 100644 --- a/pkg/controller/external_secrets_manager/controller.go +++ b/pkg/controller/external_secrets_manager/controller.go @@ -30,6 +30,7 @@ import ( "k8s.io/client-go/util/retry" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/builder" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/event" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" @@ -45,7 +46,7 @@ import ( const ( ControllerName = "external-secrets-manager" - // finalizer name for externalsecretsconfigs.operator.openshift.io resource. + // finalizer name for externalsecretsmanagers.operator.openshift.io resource. finalizer = "externalsecretsmanagers.operator.openshift.io/" + ControllerName ) @@ -225,7 +226,7 @@ func (r *Reconciler) updateStatusCondition(esm *operatorv1alpha1.ExternalSecrets // updateStatus is for updating the status subresource of externalsecretsmanagers.operator.openshift.io. func (r *Reconciler) updateStatus(ctx context.Context, changed *operatorv1alpha1.ExternalSecretsManager) error { - namespacedName := types.NamespacedName{Name: changed.Name, Namespace: changed.Namespace} + namespacedName := client.ObjectKeyFromObject(changed) if err := retry.RetryOnConflict(retry.DefaultRetry, func() error { r.log.V(4).Info("updating externalsecretsmanagers.operator.openshift.io status", "request", namespacedName) current := &operatorv1alpha1.ExternalSecretsManager{} diff --git a/test/apis/README.md b/test/apis/README.md index 4fd7fcac..01b052d4 100644 --- a/test/apis/README.md +++ b/test/apis/README.md @@ -1 +1 @@ -Refer to https://github.com/openshift/api/tree/master/tests for more details. \ No newline at end of file +Refer to the [openshift/api test suite](https://github.com/openshift/api/tree/master/tests) for more details. From 7b13b10046e2a28df54a55f0a39e6b6586449210 Mon Sep 17 00:00:00 2001 From: Bharath B Date: Fri, 3 Oct 2025 13:50:21 +0530 Subject: [PATCH 5/7] ESO-101: fixes GO-2025-3915 vulnerability Signed-off-by: Bharath B --- Makefile | 5 ++--- go.mod | 2 +- go.sum | 4 ++-- vendor/modules.txt | 2 +- 4 files changed, 6 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index f490dd68..ae04e0e6 100644 --- a/Makefile +++ b/Makefile @@ -419,9 +419,8 @@ docs: crd-ref-docs .PHONY: govulnscan #The ignored vulnerabilities are not in the operator code, but in the vendored packages. # - https://pkg.go.dev/vuln/GO-2025-3956 -# - https://pkg.go.dev/vuln/GO-2025-3915 # - https://pkg.go.dev/vuln/GO-2025-3547 -# _ https://pkg.go.dev/vuln/GO-2025-3521 +# - https://pkg.go.dev/vuln/GO-2025-3521 KNOWN_VULNERABILITIES:="GO-2025-3547|GO-2025-3521|GO-2025-3956|GO-2025-3915" govulnscan: govulncheck $(OUTPUTS_PATH) ## Run govulncheck - $(GOVULNCHECK) ./... > $(OUTPUTS_PATH)/govulcheck.results 2>&1 @@ -435,4 +434,4 @@ test-apis: envtest ginkgo .PHONY: clean clean: - rm -rf $(LOCALBIN) $(OUTPUTS_PATH) cover.out dist \ No newline at end of file + rm -rf $(LOCALBIN) $(OUTPUTS_PATH) cover.out dist diff --git a/go.mod b/go.mod index e75bcefe..eb2ee2c9 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( k8s.io/apimachinery v0.32.3 k8s.io/client-go v0.32.3 k8s.io/klog/v2 v2.130.1 - k8s.io/kubernetes v1.32.6 + k8s.io/kubernetes v1.32.8 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 sigs.k8s.io/controller-runtime v0.20.5-0.20250517180713-32e5e9e948a5 sigs.k8s.io/controller-runtime/tools/setup-envtest v0.0.0-20250308055145-5fe7bb3edc86 diff --git a/go.sum b/go.sum index 8e09e6a1..6886bb5e 100644 --- a/go.sum +++ b/go.sum @@ -849,8 +849,8 @@ k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911 h1:gAXU86Fmbr/ktY17lkHwSj k8s.io/kube-openapi v0.0.0-20250701173324-9bd5c66d9911/go.mod h1:GLOk5B+hDbRROvt0X2+hqX64v/zO3vXN7J78OUmBSKw= k8s.io/kubelet v0.32.1 h1:bB91GvMsZb+LfzBxnjPEr1Fal/sdxZtYphlfwAaRJGw= k8s.io/kubelet v0.32.1/go.mod h1:4sAEZ6PlewD0GroV3zscY7llym6kmNNTVmUI/Qshm6w= -k8s.io/kubernetes v1.32.6 h1:tp1gRjOqZjaoFBek5PN6eSmODdS1QRrH5UKiFP8ZByg= -k8s.io/kubernetes v1.32.6/go.mod h1:REY0Gok66BTTrbGyZaFMNKO9JhxvgBDW9B7aksWRFoY= +k8s.io/kubernetes v1.32.8 h1:NePHsWPIT9NQZ9w5QT/chJMuwjFFGGZxalvD6FlOjlw= +k8s.io/kubernetes v1.32.8/go.mod h1:REY0Gok66BTTrbGyZaFMNKO9JhxvgBDW9B7aksWRFoY= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= mvdan.cc/gofumpt v0.7.0 h1:bg91ttqXmi9y2xawvkuMXyvAA/1ZGJqYAEGjXuP0JXU= diff --git a/vendor/modules.txt b/vendor/modules.txt index ccc059bf..a16a0983 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -2540,7 +2540,7 @@ k8s.io/kube-openapi/pkg/validation/strfmt/bson # k8s.io/kubelet v0.32.1 ## explicit; go 1.23.0 k8s.io/kubelet/pkg/apis -# k8s.io/kubernetes v1.32.6 +# k8s.io/kubernetes v1.32.8 ## explicit; go 1.23.0 k8s.io/kubernetes/pkg/api/service k8s.io/kubernetes/pkg/api/v1/service From 361b2006ac52993a973381646579434c56372644 Mon Sep 17 00:00:00 2001 From: Bharath B Date: Fri, 3 Oct 2025 13:51:51 +0530 Subject: [PATCH 6/7] ESO-101: Removes default value from required type field --- api/v1alpha1/external_secrets_config_types.go | 3 +-- api/v1alpha1/zz_generated.deepcopy.go | 6 +++++- .../external-secrets-operator.clusterserviceversion.yaml | 3 ++- .../operator.openshift.io_externalsecretsconfigs.yaml | 1 - .../operator.openshift.io_externalsecretsconfigs.yaml | 1 - config/manager/manager.yaml | 1 + docs/api_reference.md | 9 ++------- pkg/controller/external_secrets/certificate.go | 3 +-- pkg/controller/external_secrets/certificate_test.go | 4 +++- 9 files changed, 15 insertions(+), 16 deletions(-) diff --git a/api/v1alpha1/external_secrets_config_types.go b/api/v1alpha1/external_secrets_config_types.go index 930b1c0d..ade8a5f3 100644 --- a/api/v1alpha1/external_secrets_config_types.go +++ b/api/v1alpha1/external_secrets_config_types.go @@ -157,7 +157,6 @@ type CertManagerConfig struct { // This field is immutable once set. // +kubebuilder:validation:XValidation:rule="self == oldSelf",message="mode is immutable once set" // +kubebuilder:validation:Enum:=Enabled;Disabled - // +kubebuilder:default:=Disabled // +kubebuilder:validation:Required Mode Mode `json:"mode,omitempty"` @@ -176,7 +175,7 @@ type CertManagerConfig struct { // +kubebuilder:validation:XValidation:rule="!has(self.kind) || self.kind.lowerAscii() == 'issuer' || self.kind.lowerAscii() == 'clusterissuer'",message="kind must be either 'Issuer' or 'ClusterIssuer'" // +kubebuilder:validation:XValidation:rule="!has(self.group) || self.group.lowerAscii() == 'cert-manager.io'",message="group must be 'cert-manager.io'" // +kubebuilder:validation:Optional - IssuerRef ObjectReference `json:"issuerRef,omitempty"` + IssuerRef *ObjectReference `json:"issuerRef,omitempty"` // certificateDuration is the validity period of the webhook certificate. // +kubebuilder:default:="8760h" diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 6496238a..4474b846 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -70,7 +70,11 @@ func (in *BitwardenSecretManagerProvider) DeepCopy() *BitwardenSecretManagerProv // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CertManagerConfig) DeepCopyInto(out *CertManagerConfig) { *out = *in - out.IssuerRef = in.IssuerRef + if in.IssuerRef != nil { + in, out := &in.IssuerRef, &out.IssuerRef + *out = new(ObjectReference) + **out = **in + } if in.CertificateDuration != nil { in, out := &in.CertificateDuration, &out.CertificateDuration *out = new(v1.Duration) diff --git a/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml b/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml index 4140d32b..b46d38f9 100644 --- a/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml +++ b/bundle/manifests/external-secrets-operator.clusterserviceversion.yaml @@ -220,7 +220,7 @@ metadata: categories: Security console.openshift.io/disable-operand-delete: "true" containerImage: openshift.io/external-secrets-operator:latest - createdAt: "2025-09-26T11:41:31Z" + createdAt: "2025-10-03T05:25:19Z" features.operators.openshift.io/cnf: "false" features.operators.openshift.io/cni: "false" features.operators.openshift.io/csi: "false" @@ -656,6 +656,7 @@ spec: - name: METRICS_SECURE value: "true" image: openshift.io/external-secrets-operator:latest + imagePullPolicy: Always livenessProbe: httpGet: path: /healthz diff --git a/bundle/manifests/operator.openshift.io_externalsecretsconfigs.yaml b/bundle/manifests/operator.openshift.io_externalsecretsconfigs.yaml index 482135f6..16f4b5b1 100644 --- a/bundle/manifests/operator.openshift.io_externalsecretsconfigs.yaml +++ b/bundle/manifests/operator.openshift.io_externalsecretsconfigs.yaml @@ -1239,7 +1239,6 @@ spec: rule: '!has(self.group) || self.group.lowerAscii() == ''cert-manager.io''' mode: - default: Disabled description: |- mode indicates whether to use cert-manager for certificate management, instead of built-in cert-controller. Enabled: Makes use of cert-manager for obtaining the certificates for webhook server and other components. diff --git a/config/crd/bases/operator.openshift.io_externalsecretsconfigs.yaml b/config/crd/bases/operator.openshift.io_externalsecretsconfigs.yaml index 9ae490e8..a8cd6df0 100644 --- a/config/crd/bases/operator.openshift.io_externalsecretsconfigs.yaml +++ b/config/crd/bases/operator.openshift.io_externalsecretsconfigs.yaml @@ -1239,7 +1239,6 @@ spec: rule: '!has(self.group) || self.group.lowerAscii() == ''cert-manager.io''' mode: - default: Disabled description: |- mode indicates whether to use cert-manager for certificate management, instead of built-in cert-controller. Enabled: Makes use of cert-manager for obtaining the certificates for webhook server and other components. diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 0bebe399..6eb021e1 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -94,6 +94,7 @@ spec: - name: METRICS_SECURE value: "true" image: controller:latest + imagePullPolicy: Always name: manager securityContext: readOnlyRootFilesystem: true diff --git a/docs/api_reference.md b/docs/api_reference.md index e139176f..6d7b665d 100644 --- a/docs/api_reference.md +++ b/docs/api_reference.md @@ -69,7 +69,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | -| `mode` _[Mode](#mode)_ | mode indicates whether to use cert-manager for certificate management, instead of built-in cert-controller.
Enabled: Makes use of cert-manager for obtaining the certificates for webhook server and other components.
Disabled: Makes use of in-built cert-controller for obtaining the certificates for webhook server, which is the default behavior.
This field is immutable once set. | Disabled | Enum: [Enabled Disabled]
Required: \{\}
| +| `mode` _[Mode](#mode)_ | mode indicates whether to use cert-manager for certificate management, instead of built-in cert-controller.
Enabled: Makes use of cert-manager for obtaining the certificates for webhook server and other components.
Disabled: Makes use of in-built cert-controller for obtaining the certificates for webhook server, which is the default behavior.
This field is immutable once set. | | Enum: [Enabled Disabled]
Required: \{\}
| | `injectAnnotations` _string_ | injectAnnotations is for adding the `cert-manager.io/inject-ca-from` annotation to the webhooks and CRDs to automatically setup webhook to use the cert-manager CA. This requires CA Injector to be enabled in cert-manager.
Use `true` or `false` to indicate the preference. This field is immutable once set. | false | Enum: [true false]
Optional: \{\}
| | `issuerRef` _ObjectReference_ | issuerRef contains details of the referenced object used for obtaining certificates.
When `issuerRef.Kind` is `Issuer`, it must exist in the `external-secrets` namespace.
This field is immutable once set. | | Optional: \{\}
| | `certificateDuration` _[Duration](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.32/#duration-v1-meta)_ | certificateDuration is the validity period of the webhook certificate. | 8760h | Optional: \{\}
| @@ -378,7 +378,7 @@ _Appears in:_ #### ObjectReference - +_Underlying type:_ _[struct{Name string "json:\"name\""; Kind string "json:\"kind,omitempty\""; Group string "json:\"group,omitempty\""}](#struct{name-string-"json:\"name\"";-kind-string-"json:\"kind,omitempty\"";-group-string-"json:\"group,omitempty\""})_ ObjectReference is a reference to an object with a given name, kind and group. @@ -387,11 +387,6 @@ ObjectReference is a reference to an object with a given name, kind and group. _Appears in:_ - [CertManagerConfig](#certmanagerconfig) -| Field | Description | Default | Validation | -| --- | --- | --- | --- | -| `name` _string_ | Name of the resource being referred to. | | MaxLength: 253
MinLength: 1
Required: \{\}
| -| `kind` _string_ | Kind of the resource being referred to. | | MaxLength: 253
MinLength: 1
Optional: \{\}
| -| `group` _string_ | Group of the resource being referred to. | | MaxLength: 253
MinLength: 1
Optional: \{\}
| #### PluginsConfig diff --git a/pkg/controller/external_secrets/certificate.go b/pkg/controller/external_secrets/certificate.go index e90941e7..955d0719 100644 --- a/pkg/controller/external_secrets/certificate.go +++ b/pkg/controller/external_secrets/certificate.go @@ -2,7 +2,6 @@ package external_secrets import ( "fmt" - "reflect" "strings" corev1 "k8s.io/api/core/v1" @@ -98,7 +97,7 @@ func (r *Reconciler) updateCertificateParams(esc *operatorv1alpha1.ExternalSecre if esc.Spec.ControllerConfig.CertProvider != nil && esc.Spec.ControllerConfig.CertProvider.CertManager != nil { certManageConfig = esc.Spec.ControllerConfig.CertProvider.CertManager } - if reflect.ValueOf(certManageConfig.IssuerRef).IsZero() { + if certManageConfig.IssuerRef == nil { return fmt.Errorf("cert-manager is enabled but issuerRef is not configured") } if certManageConfig.IssuerRef.Name == "" { diff --git a/pkg/controller/external_secrets/certificate_test.go b/pkg/controller/external_secrets/certificate_test.go index 2fbfc114..580ac7ae 100644 --- a/pkg/controller/external_secrets/certificate_test.go +++ b/pkg/controller/external_secrets/certificate_test.go @@ -471,7 +471,9 @@ func testExternalSecretsConfigForCertificate() *v1alpha1.ExternalSecretsConfig { esc.Spec = v1alpha1.ExternalSecretsConfigSpec{ ControllerConfig: v1alpha1.ControllerConfig{ CertProvider: &v1alpha1.CertProvidersConfig{ - CertManager: &v1alpha1.CertManagerConfig{}, + CertManager: &v1alpha1.CertManagerConfig{ + IssuerRef: &v1alpha1.ObjectReference{}, + }, }, }, ApplicationConfig: v1alpha1.ApplicationConfig{ From 314e1fec69660bea294472ff22f38a912e17cdc0 Mon Sep 17 00:00:00 2001 From: Bharath B Date: Fri, 3 Oct 2025 13:52:35 +0530 Subject: [PATCH 7/7] ESO-101: Uses logLevel configured in ExternalSecretsManager object too --- .../external_secrets/deployments.go | 4 ++-- .../install_external_secrets.go | 24 +++++-------------- pkg/controller/external_secrets/utils.go | 12 +++++++--- 3 files changed, 17 insertions(+), 23 deletions(-) diff --git a/pkg/controller/external_secrets/deployments.go b/pkg/controller/external_secrets/deployments.go index 32ed82c0..3d1e7468 100644 --- a/pkg/controller/external_secrets/deployments.go +++ b/pkg/controller/external_secrets/deployments.go @@ -114,13 +114,13 @@ func (r *Reconciler) getDeploymentObject(assetName string, esc *operatorv1alpha1 if bitwardenImage == "" { return nil, common.NewIrrecoverableError(fmt.Errorf("%s environment variable with bitwarden-sdk-server image not set", bitwardenImageEnvVarName), "failed to update image in %s deployment object", deployment.GetName()) } - logLevel := getLogLevel(esc.Spec) + logLevel := getLogLevel(esc, r.esm) switch assetName { case controllerDeploymentAssetName: updateContainerSpec(deployment, esc, image, logLevel) case webhookDeploymentAssetName: - var checkInterval string + checkInterval := "5m" if esc.Spec.ApplicationConfig.WebhookConfig != nil && esc.Spec.ApplicationConfig.WebhookConfig.CertificateCheckInterval != nil { checkInterval = esc.Spec.ApplicationConfig.WebhookConfig.CertificateCheckInterval.Duration.String() diff --git a/pkg/controller/external_secrets/install_external_secrets.go b/pkg/controller/external_secrets/install_external_secrets.go index 388b324a..8357569e 100644 --- a/pkg/controller/external_secrets/install_external_secrets.go +++ b/pkg/controller/external_secrets/install_external_secrets.go @@ -7,7 +7,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "sigs.k8s.io/controller-runtime/pkg/client" operatorv1alpha1 "github.com/openshift/external-secrets-operator/api/v1alpha1" "github.com/openshift/external-secrets-operator/pkg/controller/common" @@ -109,27 +108,16 @@ func (r *Reconciler) createOrApplyNamespace(esc *operatorv1alpha1.ExternalSecret namespace := getNamespace(esc) obj := &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ - Name: namespace, - Labels: resourceLabels, + Name: namespace, }, } - got := &corev1.Namespace{} - err := r.Get(r.ctx, client.ObjectKeyFromObject(obj), got) - if err != nil { - if !errors.IsNotFound(err) { - return fmt.Errorf("failed to fetch %s namespace: %w", namespace, err) - } - if err = r.Create(r.ctx, obj); err != nil { - return fmt.Errorf("failed to create %s namespace: %w", namespace, err) - } - return nil - } - if common.ObjectMetadataModified(obj, got) { - common.UpdateResourceLabels(got, resourceLabels) - if err = r.Update(r.ctx, obj); err != nil { - return fmt.Errorf("failed to update %s namespace with labels: %w", namespace, err) + if err := r.Create(r.ctx, obj); err != nil { + if errors.IsAlreadyExists(err) { + r.log.V(4).Info("namespace already exists", "namespace", namespace) + return nil } + return fmt.Errorf("failed to create %s namespace: %w", namespace, err) } return nil } diff --git a/pkg/controller/external_secrets/utils.go b/pkg/controller/external_secrets/utils.go index be1e2839..b68348bf 100644 --- a/pkg/controller/external_secrets/utils.go +++ b/pkg/controller/external_secrets/utils.go @@ -98,10 +98,16 @@ func isBitwardenConfigEnabled(esc *operatorv1alpha1.ExternalSecretsConfig) bool common.EvalMode(esc.Spec.Plugins.BitwardenSecretManagerProvider.Mode) } -func getLogLevel(config operatorv1alpha1.ExternalSecretsConfigSpec) string { - switch config.ApplicationConfig.LogLevel { +func getLogLevel(esc *operatorv1alpha1.ExternalSecretsConfig, esm *operatorv1alpha1.ExternalSecretsManager) string { + var logLevel int32 = 1 + if esc.Spec.ApplicationConfig.LogLevel != 0 { + logLevel = esc.Spec.ApplicationConfig.LogLevel + } else if esm.Spec.GlobalConfig != nil && esm.Spec.GlobalConfig.LogLevel != 0 { + logLevel = esm.Spec.GlobalConfig.LogLevel + } + switch logLevel { case 0, 1, 2: - return zapcore.Level(config.ApplicationConfig.LogLevel).String() + return zapcore.Level(logLevel).String() case 4, 5: return zapcore.DebugLevel.String() }