From 51b873eb21573258779469f28c8a7f9dd311e66e Mon Sep 17 00:00:00 2001 From: Bryan Cox Date: Fri, 5 Sep 2025 10:06:48 -0400 Subject: [PATCH 1/9] deps: upgrade Kubernetes dependencies to v0.33.4 - Update core Kubernetes dependencies from v0.32.3 to v0.33.4: - k8s.io/api, k8s.io/apimachinery, k8s.io/client-go - k8s.io/apiserver, k8s.io/cli-runtime, k8s.io/kubectl - k8s.io/apiextensions-apiserver, k8s.io/component-base - Upgrade prometheus/client_golang from v1.19.1 to v1.22.0 - Update cel.dev/expr from v0.18.0 to v0.19.1 - Upgrade google/cel-go from v0.22.0 to v0.23.2 - Update golang.org/x/time from v0.8.0 to v0.9.0 - Upgrade gRPC from v1.67.3 to v1.68.1 - Update OpenTelemetry packages to v1.33.0 - Refresh k8s.io/utils and other indirect dependencies - Update kube-openapi and structured-merge-diff versions --- go.mod | 57 ++++++++++--------- go.sum | 175 ++++++++++++++++++++++++++++++++++----------------------- 2 files changed, 136 insertions(+), 96 deletions(-) diff --git a/go.mod b/go.mod index 12c73d1408..12a2d93720 100644 --- a/go.mod +++ b/go.mod @@ -55,16 +55,16 @@ require ( golang.org/x/net v0.43.0 golang.org/x/text v0.28.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.32.3 - k8s.io/apiextensions-apiserver v0.32.3 - k8s.io/apimachinery v0.32.3 - k8s.io/apiserver v0.32.3 - k8s.io/cli-runtime v0.32.3 - k8s.io/client-go v0.32.3 - k8s.io/component-base v0.32.3 + k8s.io/api v0.33.4 + k8s.io/apiextensions-apiserver v0.33.4 + k8s.io/apimachinery v0.33.4 + k8s.io/apiserver v0.33.4 + k8s.io/cli-runtime v0.33.4 + k8s.io/client-go v0.33.4 + k8s.io/component-base v0.33.4 k8s.io/klog/v2 v2.130.1 - k8s.io/kubectl v0.32.3 - k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 + k8s.io/kubectl v0.33.4 + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d sigs.k8s.io/aws-iam-authenticator v0.6.13 sigs.k8s.io/cluster-api v1.10.2 sigs.k8s.io/cluster-api/test v1.10.2 @@ -95,7 +95,7 @@ require ( require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect - cel.dev/expr v0.18.0 // indirect + cel.dev/expr v0.19.1 // indirect dario.cat/mergo v1.0.1 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect @@ -110,7 +110,7 @@ require ( github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/adrg/xdg v0.5.3 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 // indirect + github.com/aws/aws-sdk-go v1.55.7 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 // indirect @@ -167,10 +167,9 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang/glog v1.2.5 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect - github.com/google/cel-go v0.22.0 // indirect - github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 // indirect + github.com/google/cel-go v0.23.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-github/v53 v53.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f // indirect @@ -178,8 +177,8 @@ require ( github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/css v1.0.1 // indirect - github.com/gorilla/websocket v1.5.3 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/go-version v1.6.0 // indirect github.com/huandu/xstrings v1.5.0 // indirect @@ -188,6 +187,7 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kylelemons/godebug v1.1.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect @@ -232,14 +232,16 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/zalando/go-keyring v0.2.3 // indirect gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect - go.opentelemetry.io/otel v1.29.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect - go.opentelemetry.io/otel/metric v1.29.0 // indirect - go.opentelemetry.io/otel/sdk v1.29.0 // indirect - go.opentelemetry.io/otel/trace v1.29.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect + go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.33.0 // indirect + go.opentelemetry.io/otel/sdk v1.33.0 // indirect + go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect @@ -258,9 +260,10 @@ require ( gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/cluster-bootstrap v0.32.3 // indirect - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/kind v0.27.0 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/go.sum b/go.sum index 4640ca0a50..4eff269ba5 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= -cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= +cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= @@ -43,8 +43,6 @@ github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4t github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535 h1:4daAzAu0S6Vi7/lbWECcX0j45yZReDZ56BQsrVBOEEY= -github.com/asaskevich/govalidator v0.0.0-20200428143746-21a406dcc535/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/aws/amazon-vpc-cni-k8s v1.15.5 h1:/mqTXB4HoGYg4CiU4Gco9iEvZ+V/309Na4HEMPgok5Q= github.com/aws/amazon-vpc-cni-k8s v1.15.5/go.mod h1:jV4wNtmgT2Ra1/oZU99DPOFsCUKnf0mYfIyzDyAUVAY= github.com/aws/aws-lambda-go v1.41.0 h1:l/5fyVb6Ud9uYd411xdHZzSf2n86TakxzpvIoz7l+3Y= @@ -194,8 +192,6 @@ github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4 github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 h1:7QPwrLT79GlD5sizHf27aoY2RTvw62mO6x7mxkScNk0= github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46/go.mod h1:esf2rsHFNlZlxsqsZDojNBcnNs5REqIvRrWRHqX0vEU= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/dvsekhvalnov/jose2go v1.6.0 h1:Y9gnSnP4qEI0+/uQkHvFXeD2PLPJeXEL+ySMEA2EjTY= github.com/dvsekhvalnov/jose2go v1.6.0/go.mod h1:QsHjhyTlD/lAVqn/NSbVZmSCGeDehTB/mPZadG+mhXU= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= @@ -281,10 +277,10 @@ github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= -github.com/google/cel-go v0.22.0 h1:b3FJZxpiv1vTMo2/5RDUqAHPxkT8mmMfJIrq1llbf7g= -github.com/google/cel-go v0.22.0/go.mod h1:BuznPXXfQDpXKWQ9sPW3TzlAJN5zzFe+i9tIs0yC4s8= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49 h1:0VpGH+cDhbDtdcweoyCVsF3fhN8kejK6rFe/2FFX2nU= -github.com/google/gnostic-models v0.6.9-0.20230804172637-c7be7c783f49/go.mod h1:BkkQ4L1KS1xMt2aWSPStnn55ChGC0DPOn2FQYj+f25M= +github.com/google/cel-go v0.23.2 h1:UdEe3CvQh3Nv+E/j9r1Y//WO0K0cSyD7/y0bzyLIMI4= +github.com/google/cel-go v0.23.2/go.mod h1:52Pb6QsDbC5kvgxvZhiL9QX1oZEkcUF/ZqaPx1J5Wwo= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -316,16 +312,12 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= -github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= -github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 h1:bkypFPDjIYGfCYD5mRBvpqxfYX1YCS1PXdKYWi8FsN0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0/go.mod h1:P+Lt/0by1T8bfcF3z737NnSbmxQAppXMRziHUxPOC8k= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c/go.mod h1:NMPJylDgVpX0MLRlPy15sqSwOFv/U1GZ2m21JhFfek0= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= @@ -395,8 +387,6 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= -github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= -github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -407,8 +397,11 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +<<<<<<< HEAD github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +======= +>>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -422,12 +415,15 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +<<<<<<< HEAD github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +======= +>>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -519,6 +515,7 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +<<<<<<< HEAD github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -528,17 +525,33 @@ github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2 github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= +======= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +>>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= +<<<<<<< HEAD github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +======= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +>>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= @@ -561,8 +574,6 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA= github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog= -github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= @@ -581,9 +592,13 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +<<<<<<< HEAD github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +======= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +>>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -597,8 +612,6 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= -github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= @@ -608,8 +621,6 @@ github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcY github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20181112162635-ac52e6811b56/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510 h1:S2dVYn90KE98chqDkyE9Z4N61UnQd+KOfgp5Iu53llk= -github.com/xiang90/probing v0.0.0-20221125231312-a49e3df8f510/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= @@ -621,34 +632,27 @@ github.com/zgalor/weberr v0.8.2/go.mod h1:cqK89mj84q3PRgqQXQFWJDzCorOd8xOtov/ulO github.com/ziutek/telnet v0.0.0-20180329124119-c3b780dc415b/go.mod h1:IZpXDfkJ6tWD3PhBK5YzgQT+xJWh7OsdwiG8hA2MkO4= gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a h1:DxppxFKRqJ8WD6oJ3+ZXKDY0iMONQDl5UTg2aTyHh8k= gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a/go.mod h1:NREvu3a57BaK0R1+ztrEzHWiZAihohNLQ6trPxlIqZI= -go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= -go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= -go.etcd.io/etcd/api/v3 v3.5.20 h1:aKfz3nPZECWoZJXMSH9y6h2adXjtOHaHTGEVCuCmaz0= -go.etcd.io/etcd/api/v3 v3.5.20/go.mod h1:QqKGViq4KTgOG43dr/uH0vmGWIaoJY3ggFi6ZH0TH/U= -go.etcd.io/etcd/client/pkg/v3 v3.5.20 h1:sZIAtra+xCo56gdf6BR62to/hiie5Bwl7hQIqMzVTEM= -go.etcd.io/etcd/client/pkg/v3 v3.5.20/go.mod h1:qaOi1k4ZA9lVLejXNvyPABrVEe7VymMF2433yyRQ7O0= -go.etcd.io/etcd/client/v2 v2.305.16 h1:kQrn9o5czVNaukf2A2At43cE9ZtWauOtf9vRZuiKXow= -go.etcd.io/etcd/client/v2 v2.305.16/go.mod h1:h9YxWCzcdvZENbfzBTFCnoNumr2ax3F19sKMqHFmXHE= -go.etcd.io/etcd/client/v3 v3.5.20 h1:jMT2MwQEhyvhQg49Cec+1ZHJzfUf6ZgcmV0GjPv0tIQ= -go.etcd.io/etcd/client/v3 v3.5.20/go.mod h1:J5lbzYRMUR20YolS5UjlqqMcu3/wdEvG5VNBhzyo3m0= -go.etcd.io/etcd/pkg/v3 v3.5.16 h1:cnavs5WSPWeK4TYwPYfmcr3Joz9BH+TZ6qoUtz6/+mc= -go.etcd.io/etcd/pkg/v3 v3.5.16/go.mod h1:+lutCZHG5MBBFI/U4eYT5yL7sJfnexsoM20Y0t2uNuY= -go.etcd.io/etcd/raft/v3 v3.5.16 h1:zBXA3ZUpYs1AwiLGPafYAKKl/CORn/uaxYDwlNwndAk= -go.etcd.io/etcd/raft/v3 v3.5.16/go.mod h1:P4UP14AxofMJ/54boWilabqqWoW9eLodl6I5GdGzazI= -go.etcd.io/etcd/server/v3 v3.5.16 h1:d0/SAdJ3vVsZvF8IFVb1k8zqMZ+heGcNfft71ul9GWE= -go.etcd.io/etcd/server/v3 v3.5.16/go.mod h1:ynhyZZpdDp1Gq49jkUg5mfkDWZwXnn3eIqCqtJnrD/s= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= -go.opentelemetry.io/otel v1.29.0 h1:PdomN/Al4q/lN6iBJEN3AwPvUiHPMlt93c8bqTG5Llw= -go.opentelemetry.io/otel v1.29.0/go.mod h1:N/WtXPs1CNCUEx+Agz5uouwCba+i+bJGFicT8SR4NP8= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ= +go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= +go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= +go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= +go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs= +go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY= +go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= +<<<<<<< HEAD go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= @@ -661,6 +665,16 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +======= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +>>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= @@ -770,10 +784,17 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +<<<<<<< HEAD golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +======= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +>>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -804,8 +825,6 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 h1:ToEetK57OidYuqD4Q5w+vfEnPvPpuTwedCNVohYJfNk= -google.golang.org/genproto v0.0.0-20241118233622-e639e219e697/go.mod h1:JJrvXBWRZaFMxBufik1a4RpFw4HhgVtBBWQeQgUj2cc= google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= @@ -814,8 +833,8 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= -google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= +google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= +google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -839,8 +858,6 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= -gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -857,6 +874,7 @@ gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +<<<<<<< HEAD honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= @@ -870,20 +888,36 @@ k8s.io/cli-runtime v0.32.3 h1:khLF2ivU2T6Q77H97atx3REY9tXiA3OLOjWJxUrdvss= k8s.io/cli-runtime v0.32.3/go.mod h1:vZT6dZq7mZAca53rwUfdFSZjdtLyfF61mkf/8q+Xjak= k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +======= +k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= +k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= +k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU= +k8s.io/apiextensions-apiserver v0.33.4/go.mod h1:mWXcZQkQV1GQyxeIjYApuqsn/081hhXPZwZ2URuJeSs= +k8s.io/apimachinery v0.33.4 h1:SOf/JW33TP0eppJMkIgQ+L6atlDiP/090oaX0y9pd9s= +k8s.io/apimachinery v0.33.4/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.4 h1:6N0TEVA6kASUS3owYDIFJjUH6lgN8ogQmzZvaFFj1/Y= +k8s.io/apiserver v0.33.4/go.mod h1:8ODgXMnOoSPLMUg1aAzMFx+7wTJM+URil+INjbTZCok= +k8s.io/cli-runtime v0.33.4 h1:V8NSxGfh24XzZVhXmIGzsApdBpGq0RQS2u/Fz1GvJwk= +k8s.io/cli-runtime v0.33.4/go.mod h1:V+ilyokfqjT5OI+XE+O515K7jihtr0/uncwoyVqXaIU= +k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= +k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= +>>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) k8s.io/cluster-bootstrap v0.32.3 h1:AqIpsUhB6MUeaAsl1WvaUw54AHRd2hfZrESlKChtd8s= k8s.io/cluster-bootstrap v0.32.3/go.mod h1:CHbBwgOb6liDV6JFUTkx5t85T2xidy0sChBDoyYw344= -k8s.io/component-base v0.32.3 h1:98WJvvMs3QZ2LYHBzvltFSeJjEx7t5+8s71P7M74u8k= -k8s.io/component-base v0.32.3/go.mod h1:LWi9cR+yPAv7cu2X9rZanTiFKB2kHA+JjmhkKjCZRpI= +k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= +k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= -k8s.io/kubectl v0.32.3 h1:VMi584rbboso+yjfv0d8uBHwwxbC438LKq+dXd5tOAI= -k8s.io/kubectl v0.32.3/go.mod h1:6Euv2aso5GKzo/UVMacV6C7miuyevpfI91SvBvV9Zdg= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/kubectl v0.33.4 h1:nXEI6Vi+oB9hXxoAHyHisXolm/l1qutK3oZQMak4N98= +k8s.io/kubectl v0.33.4/go.mod h1:Xe7P9X4DfILvKmlBsVqUtzktkI56lEj22SJW7cFy6nE= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0 h1:CPT0ExVicCzcpeN4baWEV2ko2Z/AsiZgEdwgcfwLgMo= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/aws-iam-authenticator v0.6.13 h1:QSQcAkpt/hF97Ogyoz6sj3WD2twTd2cmxFb4e6Rs9gA= sigs.k8s.io/aws-iam-authenticator v0.6.13/go.mod h1:CnvFyzR/xeLHmUY/BD0qW6q0wp6KIwXmFp4eTfrHdP8= sigs.k8s.io/cluster-api v1.10.2 h1:xfvtNu4Fy/41grL0ryH5xSKQjpJEWdO8HiV2lPCCozQ= @@ -896,7 +930,10 @@ sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1 sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= From 45f04513e98177e70c93110aa5138e6e541a6658 Mon Sep 17 00:00:00 2001 From: Bryan Cox Date: Fri, 5 Sep 2025 10:28:29 -0400 Subject: [PATCH 2/9] deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0 - Upgrade cluster-api from v1.10.2 to v1.11.1 - Upgrade controller-runtime from v0.20.4 to v0.21.0 - Update various golang.org/x/* packages - Update testing dependencies (ginkgo, gomega) - Update OpenTelemetry and other indirect dependencies --- go.mod | 73 ++++++-------- go.sum | 165 +++++++++++++++++++++++--------- test/e2e/data/e2e_conf.yaml | 18 ++-- test/e2e/data/e2e_eks_conf.yaml | 18 ++-- versions.mk | 2 +- 5 files changed, 168 insertions(+), 108 deletions(-) diff --git a/go.mod b/go.mod index 12a2d93720..d34e8ef4ca 100644 --- a/go.mod +++ b/go.mod @@ -29,26 +29,23 @@ require ( github.com/aws/smithy-go v1.22.5 github.com/awslabs/goformation/v4 v4.19.5 github.com/blang/semver v3.5.1+incompatible - github.com/coreos/ignition v0.35.0 - github.com/coreos/ignition/v2 v2.16.2 - github.com/go-logr/logr v1.4.2 + github.com/go-logr/logr v1.4.3 github.com/gofrs/flock v0.8.1 github.com/golang/mock v1.6.0 github.com/google/go-cmp v0.7.0 github.com/google/goexpect v0.0.0-20210430020637-ab937bf7fd6f github.com/google/gofuzz v1.2.0 - github.com/onsi/ginkgo/v2 v2.23.3 - github.com/onsi/gomega v1.36.3 - github.com/openshift-online/ocm-api-model/clientapi v0.0.431 + github.com/onsi/ginkgo/v2 v2.23.4 + github.com/onsi/gomega v1.38.0 github.com/openshift-online/ocm-common v0.0.31 github.com/openshift-online/ocm-sdk-go v0.1.476 - github.com/openshift/rosa v1.99.9-testing.0.20250926125556-7903b7e2b476 + github.com/openshift/rosa v1.2.55 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.23.0 github.com/sergi/go-diff v1.3.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.6 + github.com/spf13/pflag v1.0.7 github.com/zgalor/weberr v0.8.2 go.uber.org/mock v0.5.2 golang.org/x/crypto v0.41.0 @@ -58,7 +55,6 @@ require ( k8s.io/api v0.33.4 k8s.io/apiextensions-apiserver v0.33.4 k8s.io/apimachinery v0.33.4 - k8s.io/apiserver v0.33.4 k8s.io/cli-runtime v0.33.4 k8s.io/client-go v0.33.4 k8s.io/component-base v0.33.4 @@ -66,10 +62,10 @@ require ( k8s.io/kubectl v0.33.4 k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d sigs.k8s.io/aws-iam-authenticator v0.6.13 - sigs.k8s.io/cluster-api v1.10.2 - sigs.k8s.io/cluster-api/test v1.10.2 - sigs.k8s.io/controller-runtime v0.20.4 - sigs.k8s.io/yaml v1.4.0 + sigs.k8s.io/cluster-api v1.11.1 + sigs.k8s.io/cluster-api/test v1.11.1 + sigs.k8s.io/controller-runtime v0.21.0 + sigs.k8s.io/yaml v1.6.0 ) require ( @@ -96,21 +92,16 @@ require ( require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect cel.dev/expr v0.19.1 // indirect - dario.cat/mergo v1.0.1 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect github.com/BurntSushi/toml v1.4.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/Masterminds/semver/v3 v3.3.0 // indirect - github.com/Masterminds/sprig/v3 v3.3.0 // indirect github.com/Microsoft/go-winio v0.5.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/adrg/xdg v0.5.3 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/aws/aws-sdk-go v1.55.7 // indirect github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 // indirect github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 // indirect @@ -135,15 +126,13 @@ require ( github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.6.1 // indirect - github.com/coreos/go-semver v0.3.1 // indirect - github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect - github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/danieljoos/wincred v1.2.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v28.0.2+incompatible // indirect + github.com/docker/docker v28.3.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect @@ -173,15 +162,13 @@ require ( github.com/google/go-github/v53 v53.2.0 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f // indirect - github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect - github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/css v1.0.1 // indirect github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect github.com/hashicorp/go-version v1.6.0 // indirect - github.com/huandu/xstrings v1.5.0 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect @@ -196,9 +183,9 @@ require ( github.com/microcosm-cc/bluemonday v1.0.27 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -219,51 +206,53 @@ require ( github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b // indirect github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522 // indirect - github.com/shopspring/decimal v1.4.0 // indirect github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 // indirect github.com/sourcegraph/conc v0.3.0 // indirect github.com/spf13/afero v1.12.0 // indirect github.com/spf13/cast v1.7.1 // indirect - github.com/spf13/viper v1.20.0 // indirect + github.com/spf13/viper v1.20.1 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect - github.com/valyala/fastjson v1.6.4 // indirect - github.com/vincent-petithory/dataurl v1.0.0 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/zalando/go-keyring v0.2.3 // indirect gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect - go.opentelemetry.io/otel v1.33.0 // indirect + go.opentelemetry.io/otel v1.34.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect - go.opentelemetry.io/otel/metric v1.33.0 // indirect - go.opentelemetry.io/otel/sdk v1.33.0 // indirect - go.opentelemetry.io/otel/trace v1.33.0 // indirect + go.opentelemetry.io/otel/metric v1.34.0 // indirect + go.opentelemetry.io/otel/sdk v1.34.0 // indirect + go.opentelemetry.io/otel/trace v1.34.0 // indirect go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect + golang.org/x/net v0.42.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.16.0 // indirect golang.org/x/sys v0.35.0 // indirect golang.org/x/term v0.34.0 // indirect - golang.org/x/time v0.8.0 // indirect + golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.35.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect - google.golang.org/grpc v1.67.3 // indirect - google.golang.org/protobuf v1.36.8 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect + google.golang.org/grpc v1.71.3 // indirect + google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/cluster-bootstrap v0.32.3 // indirect + k8s.io/apiserver v0.33.4 // indirect + k8s.io/cluster-bootstrap v0.33.3 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/kind v0.27.0 // indirect + sigs.k8s.io/kind v0.30.0 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect ) diff --git a/go.sum b/go.sum index 4eff269ba5..997a9b1618 100644 --- a/go.sum +++ b/go.sum @@ -20,7 +20,11 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +<<<<<<< HEAD github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +======= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= @@ -47,8 +51,6 @@ github.com/aws/amazon-vpc-cni-k8s v1.15.5 h1:/mqTXB4HoGYg4CiU4Gco9iEvZ+V/309Na4H github.com/aws/amazon-vpc-cni-k8s v1.15.5/go.mod h1:jV4wNtmgT2Ra1/oZU99DPOFsCUKnf0mYfIyzDyAUVAY= github.com/aws/aws-lambda-go v1.41.0 h1:l/5fyVb6Ud9uYd411xdHZzSf2n86TakxzpvIoz7l+3Y= github.com/aws/aws-lambda-go v1.41.0/go.mod h1:jwFe2KmMsHmffA1X2R09hH6lFzJQxzI8qK17ewzbQMM= -github.com/aws/aws-sdk-go v1.55.7 h1:UJrkFq7es5CShfBwlWAC8DA077vp8PyVbQd3lqLiztE= -github.com/aws/aws-sdk-go v1.55.7/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= github.com/aws/aws-sdk-go-v2 v1.38.0 h1:UCRQ5mlqcFk9HJDIqENSLR3wiG1VTWlyUfLDEvY7RxU= github.com/aws/aws-sdk-go-v2 v1.38.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg= @@ -146,12 +148,20 @@ github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtM github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +<<<<<<< HEAD github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +======= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= +<<<<<<< HEAD github.com/coredns/corefile-migration v1.0.26 h1:xiiEkVB1Dwolb24pkeDUDBfygV9/XsOSq79yFCrhptY= github.com/coredns/corefile-migration v1.0.26/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb h1:rmqyI19j3Z/74bIRhuC59RB442rXUazKNueVpfJPxg4= @@ -170,6 +180,10 @@ github.com/coreos/ignition/v2 v2.16.2 h1:wPpxTovdzCLJISYmNiM5Cpw4qCPc3/P2ibruPyS github.com/coreos/ignition/v2 v2.16.2/go.mod h1:Y1BKC60VSNgA5oWNoLIHXigpFX1FFn4CVeimmsI+Bhg= github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 h1:uSmlDgJGbUB0bwQBcZomBTottKwEDF5fF8UjSwKSzWM= github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687/go.mod h1:Salmysdw7DAVuobBW/LwsKKgpyCPHUhjyJoMJD+ZJiI= +======= +github.com/coredns/corefile-migration v1.0.27 h1:WIIw5sU0LfGgoGnhdrYdVcto/aWmJoGA/C62iwkU0JM= +github.com/coredns/corefile-migration v1.0.27/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -184,8 +198,8 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v28.0.2+incompatible h1:9BILleFwug5FSSqWBgVevgL3ewDJfWWWyZVqlDMttE8= -github.com/docker/docker v28.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -221,8 +235,8 @@ github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JS github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= @@ -245,7 +259,6 @@ github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4 github.com/gobuffalo/flect v1.0.3/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= -github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= @@ -301,11 +314,16 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f h1:5CjVwnuUcp5adK4gmY6i72gpVFVnZDP2h5TmPScB6u4= github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= +<<<<<<< HEAD github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= +======= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -314,8 +332,6 @@ github.com/gorilla/css v1.0.1 h1:ntNaBIghp6JmvWnxbZKANoLyuXTPZ4cAMlo6RyhlbO8= github.com/gorilla/css v1.0.1/go.mod h1:BvnYkspnSzMmwRK+b8/xgNPLiIuNZr6vbZBTPQ2A3b0= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c h1:6rhixN/i8ZofjG1Y75iExal34USq5p+wiN1tpie8IrU= @@ -458,6 +474,10 @@ github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3N github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -484,13 +504,13 @@ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108 github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= -github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= +github.com/onsi/ginkgo/v2 v2.23.4 h1:ktYTpKJAVZnDT4VjxSbiBenUjmlL/5QkBEocaWXiQus= +github.com/onsi/ginkgo/v2 v2.23.4/go.mod h1:Bt66ApGPBFzHyR+JO10Zbt0Gsp4uWxu5mIOTusL46e8= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.12.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= -github.com/onsi/gomega v1.36.3 h1:hID7cr8t3Wp26+cYnfcjR6HpJ00fdogN6dqZ1t6IylU= -github.com/onsi/gomega v1.36.3/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= +github.com/onsi/gomega v1.38.0 h1:c/WX+w8SLAinvuKKQFh77WEucCnPk4j2OTUr7lt7BeY= +github.com/onsi/gomega v1.38.0/go.mod h1:OcXcwId0b9QsE7Y49u+BTrL4IdKOBOKnD6VQNTJEB6o= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.0-rc5 h1:Ygwkfw9bpDvs+c9E34SdgGOj41dX/cbdlwvlWt0pnFI= @@ -512,7 +532,9 @@ github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xl github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +<<<<<<< HEAD github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= <<<<<<< HEAD @@ -526,6 +548,10 @@ github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGI github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= ======= +======= +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -582,10 +608,11 @@ github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= -github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.20.0 h1:zrxIyR3RQIOsarIrgL8+sAvALXul9jeEPa06Y0Ph6vY= -github.com/spf13/viper v1.20.0/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= +github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -612,10 +639,6 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= -github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= -github.com/valyala/fastjson v1.6.4/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY= -github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= -github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -632,20 +655,12 @@ github.com/zgalor/weberr v0.8.2/go.mod h1:cqK89mj84q3PRgqQXQFWJDzCorOd8xOtov/ulO github.com/ziutek/telnet v0.0.0-20180329124119-c3b780dc415b/go.mod h1:IZpXDfkJ6tWD3PhBK5YzgQT+xJWh7OsdwiG8hA2MkO4= gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a h1:DxppxFKRqJ8WD6oJ3+ZXKDY0iMONQDl5UTg2aTyHh8k= gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a/go.mod h1:NREvu3a57BaK0R1+ztrEzHWiZAihohNLQ6trPxlIqZI= -go.etcd.io/etcd/api/v3 v3.5.21 h1:A6O2/JDb3tvHhiIz3xf9nJ7REHvtEFJJ3veW3FbCnS8= -go.etcd.io/etcd/api/v3 v3.5.21/go.mod h1:c3aH5wcvXv/9dqIw2Y810LDXJfhSYdHQ0vxmP3CCHVY= -go.etcd.io/etcd/client/pkg/v3 v3.5.21 h1:lPBu71Y7osQmzlflM9OfeIV2JlmpBjqBNlLtcoBqUTc= -go.etcd.io/etcd/client/pkg/v3 v3.5.21/go.mod h1:BgqT/IXPjK9NkeSDjbzwsHySX3yIle2+ndz28nVsjUs= -go.etcd.io/etcd/client/v3 v3.5.21 h1:T6b1Ow6fNjOLOtM0xSoKNQt1ASPCLWrF9XMHcH9pEyY= -go.etcd.io/etcd/client/v3 v3.5.21/go.mod h1:mFYy67IOqmbRf/kRUvsHixzo3iG+1OF2W2+jVIQRAnU= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= -go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= +go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= @@ -653,6 +668,7 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= <<<<<<< HEAD +<<<<<<< HEAD go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= @@ -675,6 +691,20 @@ go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37Cb go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= >>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) +======= +go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= +go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= +go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= +go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= +go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= +go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= +go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= +go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= @@ -690,6 +720,10 @@ go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -702,8 +736,13 @@ golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +<<<<<<< HEAD golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +======= +golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= +golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= @@ -730,8 +769,13 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +<<<<<<< HEAD golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +======= +golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= +golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= @@ -772,6 +816,7 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +<<<<<<< HEAD golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -779,12 +824,20 @@ golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9sn golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +======= +golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= +golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= +golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= <<<<<<< HEAD +<<<<<<< HEAD golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= @@ -792,6 +845,10 @@ golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= ======= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +======= +golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= +golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= >>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) @@ -811,10 +868,15 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +<<<<<<< HEAD golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +======= +golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= +golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -825,16 +887,16 @@ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9Ywl google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24= +google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0= -google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw= +google.golang.org/grpc v1.71.3 h1:iEhneYTxOruJyZAxdAv8Y0iRZvsc5M6KoW7UA0/7jn0= +google.golang.org/grpc v1.71.3/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -843,8 +905,13 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +<<<<<<< HEAD google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= +======= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -901,9 +968,14 @@ k8s.io/cli-runtime v0.33.4 h1:V8NSxGfh24XzZVhXmIGzsApdBpGq0RQS2u/Fz1GvJwk= k8s.io/cli-runtime v0.33.4/go.mod h1:V+ilyokfqjT5OI+XE+O515K7jihtr0/uncwoyVqXaIU= k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= +<<<<<<< HEAD >>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) k8s.io/cluster-bootstrap v0.32.3 h1:AqIpsUhB6MUeaAsl1WvaUw54AHRd2hfZrESlKChtd8s= k8s.io/cluster-bootstrap v0.32.3/go.mod h1:CHbBwgOb6liDV6JFUTkx5t85T2xidy0sChBDoyYw344= +======= +k8s.io/cluster-bootstrap v0.33.3 h1:u2NTxJ5CFSBFXaDxLQoOWMly8eni31psVso+caq6uwI= +k8s.io/cluster-bootstrap v0.33.3/go.mod h1:p970f8u8jf273zyQ5raD8WUu2XyAl0SAWOY82o7i/ds= +>>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= @@ -912,28 +984,27 @@ k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUy k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/kubectl v0.33.4 h1:nXEI6Vi+oB9hXxoAHyHisXolm/l1qutK3oZQMak4N98= k8s.io/kubectl v0.33.4/go.mod h1:Xe7P9X4DfILvKmlBsVqUtzktkI56lEj22SJW7cFy6nE= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= -k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= sigs.k8s.io/aws-iam-authenticator v0.6.13 h1:QSQcAkpt/hF97Ogyoz6sj3WD2twTd2cmxFb4e6Rs9gA= sigs.k8s.io/aws-iam-authenticator v0.6.13/go.mod h1:CnvFyzR/xeLHmUY/BD0qW6q0wp6KIwXmFp4eTfrHdP8= -sigs.k8s.io/cluster-api v1.10.2 h1:xfvtNu4Fy/41grL0ryH5xSKQjpJEWdO8HiV2lPCCozQ= -sigs.k8s.io/cluster-api v1.10.2/go.mod h1:/b9Un5Imprib6S7ZOcJitC2ep/5wN72b0pXpMQFfbTw= -sigs.k8s.io/cluster-api/test v1.10.2 h1:y6vSdS9FSAi/DNoFE2fZo2fed0m1cgW+ueBazk1g4i8= -sigs.k8s.io/cluster-api/test v1.10.2/go.mod h1:KLeRjNtQS8k5jIPvQF0QxOti/ATu5euwSusb6iFBga8= -sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= -sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= +sigs.k8s.io/cluster-api v1.11.1 h1:7CyGCTxv1p3Y2kRe1ljTj/w4TcdIdWNj0CTBc4i1aBo= +sigs.k8s.io/cluster-api v1.11.1/go.mod h1:zyrjgJ5RbXhwKcAdUlGPNK5YOHpcmxXvur+5I8lkMUQ= +sigs.k8s.io/cluster-api/test v1.11.1 h1:p9tT2HupKHW1URQDsZ3QNdEC/YPc8nrkiV6RCtNgi5k= +sigs.k8s.io/cluster-api/test v1.11.1/go.mod h1:COviHWIKTcip0VADeIh8Rm5bjqzyZ1LuzKBW1EqjJRc= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/kind v0.27.0 h1:PQ3f0iAWNIj66LYkZ1ivhEg/+Zb6UPMbO+qVei/INZA= -sigs.k8s.io/kind v0.27.0/go.mod h1:RZVFmy6qcwlSWwp6xeIUv7kXCPF3i8MXsEXxW/J+gJY= +sigs.k8s.io/kind v0.30.0 h1:2Xi1KFEfSMm0XDcvKnUt15ZfgRPCT0OnCBbpgh8DztY= +sigs.k8s.io/kind v0.30.0/go.mod h1:FSqriGaoTPruiXWfRnUXNykF8r2t+fHtK0P0m1AbGF8= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= -sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/test/e2e/data/e2e_conf.yaml b/test/e2e/data/e2e_conf.yaml index 4374126a9b..84da3bf359 100644 --- a/test/e2e/data/e2e_conf.yaml +++ b/test/e2e/data/e2e_conf.yaml @@ -26,11 +26,11 @@ images: loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-controller:v1.17.2 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.11.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.11.0 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.11.0 loadBehavior: tryLoad providers: @@ -48,8 +48,8 @@ providers: new: "imagePullPolicy: IfNotPresent" - old: --metrics-bind-addr=127.0.0.1:8080 new: --metrics-bind-addr=:8080 - - name: v1.10.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/core-components.yaml" + - name: v1.11.0 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/core-components.yaml" type: "url" contract: v1beta1 files: @@ -75,8 +75,8 @@ providers: new: "imagePullPolicy: IfNotPresent" - old: --metrics-bind-addr=127.0.0.1:8080 new: --metrics-bind-addr=:8080 - - name: v1.10.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/bootstrap-components.yaml" + - name: v1.11.0 # latest published release in the v1beta1 series; this is used for v1beta1 --> main clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/bootstrap-components.yaml" type: "url" contract: v1beta1 files: @@ -102,8 +102,8 @@ providers: new: "imagePullPolicy: IfNotPresent" - old: --metrics-bind-addr=127.0.0.1:8080 new: --metrics-bind-addr=:8080 - - name: v1.10.2 # latest published release in the v1beta1 series; this is used for v1beta1 --> v1beta1 latest clusterctl upgrades test only. - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/control-plane-components.yaml" + - name: v1.11.0 # latest published release in the v1beta1 series; this is used for v1beta1 --> v1beta1 latest clusterctl upgrades test only. + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/control-plane-components.yaml" type: "url" contract: v1beta1 files: diff --git a/test/e2e/data/e2e_eks_conf.yaml b/test/e2e/data/e2e_eks_conf.yaml index 8271d1b529..28740e1f98 100644 --- a/test/e2e/data/e2e_eks_conf.yaml +++ b/test/e2e/data/e2e_eks_conf.yaml @@ -23,19 +23,19 @@ images: loadBehavior: tryLoad - name: quay.io/jetstack/cert-manager-controller:v1.17.2 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/cluster-api-controller:v1.11.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.11.1 loadBehavior: tryLoad - - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.10.2 + - name: registry.k8s.io/cluster-api/kubeadm-control-plane-controller:v1.11.1 loadBehavior: tryLoad providers: - name: cluster-api type: CoreProvider versions: - - name: v1.10.2 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/core-components.yaml" + - name: v1.11.1 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/core-components.yaml" type: "url" contract: v1beta1 files: @@ -50,8 +50,8 @@ providers: files: - sourcePath: "./shared/v1beta1/metadata.yaml" versions: - - name: v1.10.2 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/bootstrap-components.yaml" + - name: v1.11.1 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/bootstrap-components.yaml" type: "url" contract: v1beta1 files: @@ -66,8 +66,8 @@ providers: files: - sourcePath: "./shared/v1beta1/metadata.yaml" versions: - - name: v1.10.2 - value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.10.2/control-plane-components.yaml" + - name: v1.11.1 + value: "https://github.com/kubernetes-sigs/cluster-api/releases/download/v1.11.1/control-plane-components.yaml" type: "url" contract: v1beta1 files: diff --git a/versions.mk b/versions.mk index 967152ce15..0fbd4751d0 100644 --- a/versions.mk +++ b/versions.mk @@ -15,7 +15,7 @@ MDBOOK_VERSION := v0.4.5 PLANTUML_VERSION := 1.2020.16 CERT_MANAGER_VERSION := v1.17.2 -CAPI_VERSION := v1.10.2 +CAPI_VERSION := v1.11.1 KPROMO_VERSION := v4.0.5 YQ_VERSION := v4.25.2 GOLANGCI_LINT_VERSION := v2.1.0 From bfa570ecaffa55dd278a875aaf40f9d91d467513 Mon Sep 17 00:00:00 2001 From: Bryan Cox Date: Fri, 5 Sep 2025 12:28:15 -0400 Subject: [PATCH 3/9] WIP no IDE errors --- .golangci.yml | 4 +-- api/v1beta1/awscluster_types.go | 2 +- api/v1beta1/awsclustertemplate_types.go | 3 +-- api/v1beta1/awsmachine_types.go | 3 +-- api/v1beta1/awsmachinetemplate_types.go | 3 +-- api/v1beta1/conditions_consts.go | 3 +-- api/v1beta1/tags.go | 3 +-- api/v1beta1/types.go | 3 +-- api/v1beta1/zz_generated.conversion.go | 2 +- api/v1beta1/zz_generated.deepcopy.go | 2 +- api/v1beta2/awscluster_types.go | 3 +-- api/v1beta2/awscluster_webhook.go | 2 +- api/v1beta2/awscluster_webhook_test.go | 2 +- api/v1beta2/awsclustertemplate_types.go | 3 +-- api/v1beta2/awsmachine_types.go | 3 +-- api/v1beta2/awsmachinetemplate_types.go | 3 +-- api/v1beta2/awsmanagedcluster_types.go | 3 +-- api/v1beta2/conditions_consts.go | 3 +-- api/v1beta2/tags.go | 3 +-- api/v1beta2/types.go | 3 +-- api/v1beta2/zz_generated.deepcopy.go | 2 +- bootstrap/eks/api/v1beta1/condition_consts.go | 3 +-- bootstrap/eks/api/v1beta1/eksconfig_types.go | 3 +-- .../api/v1beta1/zz_generated.conversion.go | 2 +- .../eks/api/v1beta1/zz_generated.deepcopy.go | 2 +- bootstrap/eks/api/v1beta2/condition_consts.go | 3 +-- bootstrap/eks/api/v1beta2/eksconfig_types.go | 3 +-- .../eks/api/v1beta2/zz_generated.deepcopy.go | 2 +- .../controllers/eksconfig_controller_test.go | 2 +- cmd/clusterawsadm/gc/gc.go | 2 +- cmd/clusterawsadm/gc/gc_test.go | 2 +- controllers/awscluster_controller_test.go | 2 +- .../awscluster_controller_unit_test.go | 2 +- controllers/suite_test.go | 2 +- .../v1beta1/awsmanagedcontrolplane_types.go | 3 +-- .../eks/api/v1beta1/conditions_consts.go | 3 +-- .../api/v1beta1/zz_generated.conversion.go | 2 +- .../eks/api/v1beta1/zz_generated.deepcopy.go | 2 +- .../v1beta2/awsmanagedcontrolplane_types.go | 3 +-- .../eks/api/v1beta2/conditions_consts.go | 3 +-- .../eks/api/v1beta2/zz_generated.deepcopy.go | 2 +- .../awsmanagedcontrolplane_controller_test.go | 2 +- controlplane/eks/controllers/helpers_test.go | 3 +-- controlplane/eks/controllers/suite_test.go | 3 +-- .../rosa/api/v1beta2/conditions_consts.go | 3 +-- controlplane/rosa/controllers/suite_test.go | 3 +-- exp/api/v1beta1/awsfargateprofile_types.go | 3 +-- exp/api/v1beta1/awsmachinepool_types.go | 3 +-- .../v1beta1/awsmanagedmachinepool_types.go | 3 +-- exp/api/v1beta1/conditions_consts.go | 3 +-- exp/api/v1beta1/zz_generated.conversion.go | 2 +- exp/api/v1beta1/zz_generated.deepcopy.go | 2 +- exp/api/v1beta2/awsfargateprofile_types.go | 3 +-- exp/api/v1beta2/awsfargateprofile_webhook.go | 3 +-- .../v1beta2/awsfargateprofile_webhook_test.go | 3 +-- exp/api/v1beta2/awsmachinepool_types.go | 3 +-- .../v1beta2/awsmanagedmachinepool_types.go | 3 +-- exp/api/v1beta2/conditions_consts.go | 3 +-- exp/api/v1beta2/rosacluster_types.go | 3 +-- exp/controlleridentitycreator/suite_test.go | 3 +-- exp/controllers/awsmachinepool_machines.go | 9 +++---- .../rosamachinepool_controller_test.go | 27 +++++++++---------- exp/controllers/suite_test.go | 4 +-- exp/instancestate/suite_test.go | 4 +-- main.go | 4 +-- pkg/cloud/interfaces.go | 2 +- pkg/cloud/scope/elb.go | 3 +-- pkg/cloud/scope/launchtemplate.go | 4 +-- pkg/cloud/scope/machine_test.go | 3 +-- pkg/cloud/scope/session_test.go | 3 +-- pkg/cloud/scope/shared.go | 2 +- .../autoscaling/autoscalinggroup_test.go | 7 +++-- pkg/cloud/services/ec2/bastion_test.go | 10 +++---- pkg/cloud/services/ec2/helper_test.go | 11 ++++---- pkg/cloud/services/ec2/instances_test.go | 18 ++++++------- pkg/cloud/services/ec2/launchtemplate_test.go | 2 +- pkg/cloud/services/eks/cluster_test.go | 2 +- pkg/cloud/services/eks/config.go | 2 +- pkg/cloud/services/eks/config_test.go | 2 +- pkg/cloud/services/eks/nodegroup.go | 2 +- pkg/cloud/services/eks/oidc_test.go | 3 +-- pkg/cloud/services/eks/roles.go | 3 +-- pkg/cloud/services/gc/cleanup_test.go | 2 +- pkg/cloud/services/iamauth/reconcile.go | 5 ++-- pkg/cloud/services/iamauth/reconcile_test.go | 9 +++---- pkg/cloud/services/iamauth/suite_test.go | 5 +--- .../services/instancestate/helpers_test.go | 2 +- .../services/network/carriergateways_test.go | 2 +- .../network/egress_only_gateways_test.go | 2 +- pkg/cloud/services/network/eips_test.go | 2 +- pkg/cloud/services/network/gateways_test.go | 2 +- .../services/network/natgateways_test.go | 2 +- .../services/network/routetables_test.go | 2 +- .../services/network/secondarycidr_test.go | 2 +- pkg/cloud/services/network/subnets_test.go | 2 +- pkg/cloud/services/network/vpc_test.go | 2 +- pkg/cloud/services/s3/s3_test.go | 2 +- .../services/secretsmanager/secret_test.go | 2 +- .../securitygroup/securitygroups_test.go | 2 +- pkg/cloud/services/ssm/secret_test.go | 2 +- pkg/utils/utils.go | 9 +++---- test/e2e/shared/common.go | 2 +- .../suites/managed/control_plane_helpers.go | 3 +-- test/e2e/suites/managed/machine_deployment.go | 3 +-- .../managed/machine_deployment_helpers.go | 3 +-- .../suites/managed/machine_pool_helpers.go | 8 +++--- test/e2e/suites/managed/managed_suite_test.go | 4 +-- test/e2e/suites/unmanaged/helpers_test.go | 2 +- .../unmanaged_classic_elb_upgrade_test.go | 3 +-- .../unmanaged/unmanaged_functional_test.go | 3 +-- test/helpers/envtest.go | 2 +- test/mocks/capa_clusterscoper_mock.go | 2 +- util/conditions/helper.go | 4 +-- util/paused/paused.go | 6 ++--- util/paused/paused_test.go | 8 +++--- 115 files changed, 166 insertions(+), 232 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index d87ebbdf39..e48f55626d 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -169,8 +169,8 @@ linters: alias: crclient - pkg: k8s.io/apimachinery/pkg/types alias: apimachinerytypes - - pkg: sigs.k8s.io/cluster-api/exp/api/v1beta1 - alias: expclusterv1 + - pkg: "sigs.k8s.io/cluster-api/api/core/v1beta1" + alias: clusterv1 no-unaliased: false nolintlint: require-specific: true diff --git a/api/v1beta1/awscluster_types.go b/api/v1beta1/awscluster_types.go index ddb1d2cd5a..ea0fe220a0 100644 --- a/api/v1beta1/awscluster_types.go +++ b/api/v1beta1/awscluster_types.go @@ -19,7 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/api/v1beta1/awsclustertemplate_types.go b/api/v1beta1/awsclustertemplate_types.go index 07e2cf4039..7fa11075e7 100644 --- a/api/v1beta1/awsclustertemplate_types.go +++ b/api/v1beta1/awsclustertemplate_types.go @@ -19,8 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate. type AWSClusterTemplateSpec struct { diff --git a/api/v1beta1/awsmachine_types.go b/api/v1beta1/awsmachine_types.go index 25a8cb4dcd..0a51b360cb 100644 --- a/api/v1beta1/awsmachine_types.go +++ b/api/v1beta1/awsmachine_types.go @@ -19,8 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") const ( // MachineFinalizer allows ReconcileAWSMachine to clean up AWS resources associated with AWSMachine before diff --git a/api/v1beta1/awsmachinetemplate_types.go b/api/v1beta1/awsmachinetemplate_types.go index 6e86295c6b..7ee1de3918 100644 --- a/api/v1beta1/awsmachinetemplate_types.go +++ b/api/v1beta1/awsmachinetemplate_types.go @@ -20,8 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // AWSMachineTemplateStatus defines a status for an AWSMachineTemplate. type AWSMachineTemplateStatus struct { diff --git a/api/v1beta1/conditions_consts.go b/api/v1beta1/conditions_consts.go index ae5d761df1..e10c31ce72 100644 --- a/api/v1beta1/conditions_consts.go +++ b/api/v1beta1/conditions_consts.go @@ -16,8 +16,7 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully. // A possible scenario, where retrieval is unsuccessful, is when SourcePrincipal is not authorized for assume role. diff --git a/api/v1beta1/tags.go b/api/v1beta1/tags.go index a727d39cf4..877ef85d5c 100644 --- a/api/v1beta1/tags.go +++ b/api/v1beta1/tags.go @@ -24,8 +24,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // Tags defines a map of tags. type Tags map[string]string diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go index fe6510380b..53d94def4a 100644 --- a/api/v1beta1/types.go +++ b/api/v1beta1/types.go @@ -19,8 +19,7 @@ package v1beta1 import ( "k8s.io/apimachinery/pkg/util/sets" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // AWSResourceReference is a reference to a specific AWS resource by ID or filters. // Only one of ID or Filters may be specified. Specifying more than one will result in diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index 9c7a33e9fb..eb665466ec 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -29,7 +29,7 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index b3f9c154cf..9566fb53a4 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -23,7 +23,7 @@ package v1beta1 import ( "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/api/v1beta2/awscluster_types.go b/api/v1beta2/awscluster_types.go index 213ad99c56..fc0f1f91f2 100644 --- a/api/v1beta2/awscluster_types.go +++ b/api/v1beta2/awscluster_types.go @@ -19,8 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") const ( // ClusterFinalizer allows ReconcileAWSCluster to clean up AWS resources associated with AWSCluster before diff --git a/api/v1beta2/awscluster_webhook.go b/api/v1beta2/awscluster_webhook.go index ec4fac40af..525e3157c3 100644 --- a/api/v1beta2/awscluster_webhook.go +++ b/api/v1beta2/awscluster_webhook.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" ) diff --git a/api/v1beta2/awscluster_webhook_test.go b/api/v1beta2/awscluster_webhook_test.go index ad1b22d5fb..04f2e89cdd 100644 --- a/api/v1beta2/awscluster_webhook_test.go +++ b/api/v1beta2/awscluster_webhook_test.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/util/defaulting" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestAWSClusterDefault(t *testing.T) { diff --git a/api/v1beta2/awsclustertemplate_types.go b/api/v1beta2/awsclustertemplate_types.go index e0a827fa3d..9501cde7b4 100644 --- a/api/v1beta2/awsclustertemplate_types.go +++ b/api/v1beta2/awsclustertemplate_types.go @@ -19,8 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate. type AWSClusterTemplateSpec struct { diff --git a/api/v1beta2/awsmachine_types.go b/api/v1beta2/awsmachine_types.go index 7031bdbaae..3c756c15e7 100644 --- a/api/v1beta2/awsmachine_types.go +++ b/api/v1beta2/awsmachine_types.go @@ -19,8 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") const ( // MachineFinalizer allows ReconcileAWSMachine to clean up AWS resources associated with AWSMachine before diff --git a/api/v1beta2/awsmachinetemplate_types.go b/api/v1beta2/awsmachinetemplate_types.go index 50d8dda22d..47a2b018b2 100644 --- a/api/v1beta2/awsmachinetemplate_types.go +++ b/api/v1beta2/awsmachinetemplate_types.go @@ -20,8 +20,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // AWSMachineTemplateStatus defines a status for an AWSMachineTemplate. type AWSMachineTemplateStatus struct { diff --git a/api/v1beta2/awsmanagedcluster_types.go b/api/v1beta2/awsmanagedcluster_types.go index 67d9b2fc92..d9526ad86f 100644 --- a/api/v1beta2/awsmanagedcluster_types.go +++ b/api/v1beta2/awsmanagedcluster_types.go @@ -19,8 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // AWSManagedClusterSpec defines the desired state of AWSManagedCluster type AWSManagedClusterSpec struct { diff --git a/api/v1beta2/conditions_consts.go b/api/v1beta2/conditions_consts.go index 604ef8e1d5..11065009f8 100644 --- a/api/v1beta2/conditions_consts.go +++ b/api/v1beta2/conditions_consts.go @@ -16,8 +16,7 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully. // A possible scenario, where retrieval is unsuccessful, is when SourcePrincipal is not authorized for assume role. diff --git a/api/v1beta2/tags.go b/api/v1beta2/tags.go index 45bc371a49..ad04f2f3fb 100644 --- a/api/v1beta2/tags.go +++ b/api/v1beta2/tags.go @@ -23,8 +23,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // Tags defines a map of tags. type Tags map[string]string diff --git a/api/v1beta2/types.go b/api/v1beta2/types.go index c268165c10..f6007356dd 100644 --- a/api/v1beta2/types.go +++ b/api/v1beta2/types.go @@ -21,8 +21,7 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") const ( // PreventDeletionLabel can be used in situations where preventing delation is allowed. The docs diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index 197cffba66..e74c4b29eb 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/bootstrap/eks/api/v1beta1/condition_consts.go b/bootstrap/eks/api/v1beta1/condition_consts.go index 86ef328727..c91412e9f9 100644 --- a/bootstrap/eks/api/v1beta1/condition_consts.go +++ b/bootstrap/eks/api/v1beta1/condition_consts.go @@ -16,8 +16,7 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" // Conditions and condition Reasons for the EKSConfig object // FROM: https://github.com/kubernetes-sigs/cluster-api/blob/main/bootstrap/kubeadm/api/v1beta1/condition_consts.go diff --git a/bootstrap/eks/api/v1beta1/eksconfig_types.go b/bootstrap/eks/api/v1beta1/eksconfig_types.go index d268722878..ecd3a9167a 100644 --- a/bootstrap/eks/api/v1beta1/eksconfig_types.go +++ b/bootstrap/eks/api/v1beta1/eksconfig_types.go @@ -19,8 +19,7 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // EKSConfigSpec defines the desired state of Amazon EKS Bootstrap Configuration. type EKSConfigSpec struct { diff --git a/bootstrap/eks/api/v1beta1/zz_generated.conversion.go b/bootstrap/eks/api/v1beta1/zz_generated.conversion.go index eba4f6f7ce..aa2b37b195 100644 --- a/bootstrap/eks/api/v1beta1/zz_generated.conversion.go +++ b/bootstrap/eks/api/v1beta1/zz_generated.conversion.go @@ -27,7 +27,7 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { diff --git a/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go index 131707fac7..756cbbe029 100644 --- a/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go +++ b/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/bootstrap/eks/api/v1beta2/condition_consts.go b/bootstrap/eks/api/v1beta2/condition_consts.go index e12213c840..0c4a81edff 100644 --- a/bootstrap/eks/api/v1beta2/condition_consts.go +++ b/bootstrap/eks/api/v1beta2/condition_consts.go @@ -16,8 +16,7 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" // Conditions and condition Reasons for the EKSConfig object // FROM: https://github.com/kubernetes-sigs/cluster-api/blob/main/bootstrap/kubeadm/api/v1beta1/condition_consts.go diff --git a/bootstrap/eks/api/v1beta2/eksconfig_types.go b/bootstrap/eks/api/v1beta2/eksconfig_types.go index a2fce8e2cb..dfcb014aa8 100644 --- a/bootstrap/eks/api/v1beta2/eksconfig_types.go +++ b/bootstrap/eks/api/v1beta2/eksconfig_types.go @@ -19,8 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // EKSConfigSpec defines the desired state of Amazon EKS Bootstrap Configuration. type EKSConfigSpec struct { diff --git a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go index 7b059799a7..9bc33bef6f 100644 --- a/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/bootstrap/eks/api/v1beta2/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1beta2 import ( "k8s.io/apimachinery/pkg/runtime" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/bootstrap/eks/controllers/eksconfig_controller_test.go b/bootstrap/eks/controllers/eksconfig_controller_test.go index bb82d14124..6f4a53c513 100644 --- a/bootstrap/eks/controllers/eksconfig_controller_test.go +++ b/bootstrap/eks/controllers/eksconfig_controller_test.go @@ -23,7 +23,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" bsutil "sigs.k8s.io/cluster-api/bootstrap/util" ) diff --git a/cmd/clusterawsadm/gc/gc.go b/cmd/clusterawsadm/gc/gc.go index dac5a1f004..ab44eee895 100644 --- a/cmd/clusterawsadm/gc/gc.go +++ b/cmd/clusterawsadm/gc/gc.go @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util/patch" ) diff --git a/cmd/clusterawsadm/gc/gc_test.go b/cmd/clusterawsadm/gc/gc_test.go index 8e890579aa..4720029d64 100644 --- a/cmd/clusterawsadm/gc/gc_test.go +++ b/cmd/clusterawsadm/gc/gc_test.go @@ -31,7 +31,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" ) diff --git a/controllers/awscluster_controller_test.go b/controllers/awscluster_controller_test.go index 64dbd30c44..bc5804a679 100644 --- a/controllers/awscluster_controller_test.go +++ b/controllers/awscluster_controller_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" ) diff --git a/controllers/awscluster_controller_unit_test.go b/controllers/awscluster_controller_unit_test.go index ee2d0bb9cf..c28544c677 100644 --- a/controllers/awscluster_controller_unit_test.go +++ b/controllers/awscluster_controller_unit_test.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" ) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 4ee71f9d07..8cd1a16b5d 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -31,7 +31,7 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" ) diff --git a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go index a965bef381..c892288bae 100644 --- a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go @@ -21,8 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") const ( // ManagedControlPlaneFinalizer allows the controller to clean up resources on delete. diff --git a/controlplane/eks/api/v1beta1/conditions_consts.go b/controlplane/eks/api/v1beta1/conditions_consts.go index 04b7452b19..930d2c92c9 100644 --- a/controlplane/eks/api/v1beta1/conditions_consts.go +++ b/controlplane/eks/api/v1beta1/conditions_consts.go @@ -16,8 +16,7 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // EKSControlPlaneReadyCondition condition reports on the successful reconciliation of eks control plane. EKSControlPlaneReadyCondition clusterv1.ConditionType = "EKSControlPlaneReady" diff --git a/controlplane/eks/api/v1beta1/zz_generated.conversion.go b/controlplane/eks/api/v1beta1/zz_generated.conversion.go index 95ae9313a6..07c12ce9c3 100644 --- a/controlplane/eks/api/v1beta1/zz_generated.conversion.go +++ b/controlplane/eks/api/v1beta1/zz_generated.conversion.go @@ -29,7 +29,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { diff --git a/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go b/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go index f6db3b2da0..0324486959 100644 --- a/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go +++ b/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go index be93930441..ee5e47c3d4 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go @@ -21,8 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") const ( // ManagedControlPlaneFinalizer allows the controller to clean up resources on delete. diff --git a/controlplane/eks/api/v1beta2/conditions_consts.go b/controlplane/eks/api/v1beta2/conditions_consts.go index fc8fa66721..b67a6280ef 100644 --- a/controlplane/eks/api/v1beta2/conditions_consts.go +++ b/controlplane/eks/api/v1beta2/conditions_consts.go @@ -16,8 +16,7 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // EKSControlPlaneReadyCondition condition reports on the successful reconciliation of eks control plane. EKSControlPlaneReadyCondition clusterv1.ConditionType = "EKSControlPlaneReady" diff --git a/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go index 678a641e9c..0e1b766d8b 100644 --- a/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go index 483992024d..2e24b8dc37 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go @@ -56,7 +56,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" ) diff --git a/controlplane/eks/controllers/helpers_test.go b/controlplane/eks/controllers/helpers_test.go index e79c2265b0..6b403ac6d9 100644 --- a/controlplane/eks/controllers/helpers_test.go +++ b/controlplane/eks/controllers/helpers_test.go @@ -25,8 +25,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") func getAWSManagedControlPlaneScope(cluster *clusterv1.Cluster, awsManagedControlPlane *ekscontrolplanev1.AWSManagedControlPlane) *scope.ManagedControlPlaneScope { scope, err := scope.NewManagedControlPlaneScope( diff --git a/controlplane/eks/controllers/suite_test.go b/controlplane/eks/controllers/suite_test.go index c284f3dec2..827081a258 100644 --- a/controlplane/eks/controllers/suite_test.go +++ b/controlplane/eks/controllers/suite_test.go @@ -29,8 +29,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") var ( testEnv *helpers.TestEnvironment diff --git a/controlplane/rosa/api/v1beta2/conditions_consts.go b/controlplane/rosa/api/v1beta2/conditions_consts.go index f094348440..6fd812198a 100644 --- a/controlplane/rosa/api/v1beta2/conditions_consts.go +++ b/controlplane/rosa/api/v1beta2/conditions_consts.go @@ -16,8 +16,7 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // ROSAControlPlaneReadyCondition condition reports on the successful reconciliation of ROSAControlPlane. ROSAControlPlaneReadyCondition clusterv1.ConditionType = "ROSAControlPlaneReady" diff --git a/controlplane/rosa/controllers/suite_test.go b/controlplane/rosa/controllers/suite_test.go index ebdfce2a76..83a5b0e232 100644 --- a/controlplane/rosa/controllers/suite_test.go +++ b/controlplane/rosa/controllers/suite_test.go @@ -32,8 +32,7 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") var ( testEnv *helpers.TestEnvironment diff --git a/exp/api/v1beta1/awsfargateprofile_types.go b/exp/api/v1beta1/awsfargateprofile_types.go index 155ab4915a..e9a76808ce 100644 --- a/exp/api/v1beta1/awsfargateprofile_types.go +++ b/exp/api/v1beta1/awsfargateprofile_types.go @@ -23,8 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") var ( // DefaultEKSFargateRole is the name of the default IAM role to use for fargate diff --git a/exp/api/v1beta1/awsmachinepool_types.go b/exp/api/v1beta1/awsmachinepool_types.go index fc70422c03..94433c9950 100644 --- a/exp/api/v1beta1/awsmachinepool_types.go +++ b/exp/api/v1beta1/awsmachinepool_types.go @@ -21,8 +21,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // Constants block. const ( diff --git a/exp/api/v1beta1/awsmanagedmachinepool_types.go b/exp/api/v1beta1/awsmanagedmachinepool_types.go index bd9632f95b..f48003648f 100644 --- a/exp/api/v1beta1/awsmanagedmachinepool_types.go +++ b/exp/api/v1beta1/awsmanagedmachinepool_types.go @@ -23,8 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool. type ManagedMachineAMIType string diff --git a/exp/api/v1beta1/conditions_consts.go b/exp/api/v1beta1/conditions_consts.go index 534ebb2bf9..9f667939fa 100644 --- a/exp/api/v1beta1/conditions_consts.go +++ b/exp/api/v1beta1/conditions_consts.go @@ -16,8 +16,7 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // ASGReadyCondition reports on current status of the autoscaling group. Ready indicates the group is provisioned. ASGReadyCondition clusterv1.ConditionType = "ASGReady" diff --git a/exp/api/v1beta1/zz_generated.conversion.go b/exp/api/v1beta1/zz_generated.conversion.go index 933a08f716..6b79c393af 100644 --- a/exp/api/v1beta1/zz_generated.conversion.go +++ b/exp/api/v1beta1/zz_generated.conversion.go @@ -28,7 +28,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { diff --git a/exp/api/v1beta1/zz_generated.deepcopy.go b/exp/api/v1beta1/zz_generated.deepcopy.go index da355ddf67..063e242516 100644 --- a/exp/api/v1beta1/zz_generated.deepcopy.go +++ b/exp/api/v1beta1/zz_generated.deepcopy.go @@ -23,7 +23,7 @@ package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/exp/api/v1beta2/awsfargateprofile_types.go b/exp/api/v1beta2/awsfargateprofile_types.go index 3869fd42fa..a2660a15b5 100644 --- a/exp/api/v1beta2/awsfargateprofile_types.go +++ b/exp/api/v1beta2/awsfargateprofile_types.go @@ -23,8 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") var ( // DefaultEKSFargateRole is the name of the default IAM role to use for fargate diff --git a/exp/api/v1beta2/awsfargateprofile_webhook.go b/exp/api/v1beta2/awsfargateprofile_webhook.go index ed38ff73ae..e9f11e052e 100644 --- a/exp/api/v1beta2/awsfargateprofile_webhook.go +++ b/exp/api/v1beta2/awsfargateprofile_webhook.go @@ -30,8 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") const ( maxProfileNameLength = 100 diff --git a/exp/api/v1beta2/awsfargateprofile_webhook_test.go b/exp/api/v1beta2/awsfargateprofile_webhook_test.go index 7849e0bb35..1d83f6f46a 100644 --- a/exp/api/v1beta2/awsfargateprofile_webhook_test.go +++ b/exp/api/v1beta2/awsfargateprofile_webhook_test.go @@ -27,8 +27,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" utildefaulting "sigs.k8s.io/cluster-api-provider-aws/v2/util/defaulting" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") func TestAWSFargateProfileDefault(t *testing.T) { fargate := &AWSFargateProfile{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, diff --git a/exp/api/v1beta2/awsmachinepool_types.go b/exp/api/v1beta2/awsmachinepool_types.go index ef0a219513..aaacb0728b 100644 --- a/exp/api/v1beta2/awsmachinepool_types.go +++ b/exp/api/v1beta2/awsmachinepool_types.go @@ -23,8 +23,7 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // Constants block. const ( diff --git a/exp/api/v1beta2/awsmanagedmachinepool_types.go b/exp/api/v1beta2/awsmanagedmachinepool_types.go index 0aeb7be0dc..830493cbc4 100644 --- a/exp/api/v1beta2/awsmanagedmachinepool_types.go +++ b/exp/api/v1beta2/awsmanagedmachinepool_types.go @@ -23,8 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool. // Source of truth can be found using the link below: diff --git a/exp/api/v1beta2/conditions_consts.go b/exp/api/v1beta2/conditions_consts.go index a6a3f696f4..8b01761abd 100644 --- a/exp/api/v1beta2/conditions_consts.go +++ b/exp/api/v1beta2/conditions_consts.go @@ -16,8 +16,7 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" const ( // ASGReadyCondition reports on current status of the autoscaling group. Ready indicates the group is provisioned. ASGReadyCondition clusterv1.ConditionType = "ASGReady" diff --git a/exp/api/v1beta2/rosacluster_types.go b/exp/api/v1beta2/rosacluster_types.go index 3303125d1c..5332382192 100644 --- a/exp/api/v1beta2/rosacluster_types.go +++ b/exp/api/v1beta2/rosacluster_types.go @@ -19,8 +19,7 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // ROSAClusterSpec defines the desired state of ROSACluster. type ROSAClusterSpec struct { diff --git a/exp/controlleridentitycreator/suite_test.go b/exp/controlleridentitycreator/suite_test.go index 4cf1b0bb12..20bcc1a66c 100644 --- a/exp/controlleridentitycreator/suite_test.go +++ b/exp/controlleridentitycreator/suite_test.go @@ -30,8 +30,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. diff --git a/exp/controllers/awsmachinepool_machines.go b/exp/controllers/awsmachinepool_machines.go index 24c633df05..0b27ee5a09 100644 --- a/exp/controllers/awsmachinepool_machines.go +++ b/exp/controllers/awsmachinepool_machines.go @@ -17,13 +17,12 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/labels/format" ) -func createAWSMachinesIfNotExists(ctx context.Context, awsMachineList *infrav1.AWSMachineList, mp *expclusterv1.MachinePool, infraMachinePoolMeta *metav1.ObjectMeta, infraMachinePoolType *metav1.TypeMeta, existingASG *expinfrav1.AutoScalingGroup, l logr.Logger, client client.Client, ec2Svc services.EC2Interface) error { +func createAWSMachinesIfNotExists(ctx context.Context, awsMachineList *infrav1.AWSMachineList, mp *clusterv1.MachinePool, infraMachinePoolMeta *metav1.ObjectMeta, infraMachinePoolType *metav1.TypeMeta, existingASG *expinfrav1.AutoScalingGroup, l logr.Logger, client client.Client, ec2Svc services.EC2Interface) error { if !feature.Gates.Enabled(feature.MachinePoolMachines) { return errors.New("createAWSMachinesIfNotExists must not be called unless the MachinePoolMachines feature gate is enabled") } @@ -161,7 +160,7 @@ func deleteOrphanedAWSMachines(ctx context.Context, awsMachineList *infrav1.AWSM return nil } -func getAWSMachines(ctx context.Context, mp *expclusterv1.MachinePool, kubeClient client.Client) (*infrav1.AWSMachineList, error) { +func getAWSMachines(ctx context.Context, mp *clusterv1.MachinePool, kubeClient client.Client) (*infrav1.AWSMachineList, error) { if !feature.Gates.Enabled(feature.MachinePoolMachines) { return nil, errors.New("getAWSMachines must not be called unless the MachinePoolMachines feature gate is enabled") } @@ -177,7 +176,7 @@ func getAWSMachines(ctx context.Context, mp *expclusterv1.MachinePool, kubeClien return awsMachineList, nil } -func reconcileDeleteAWSMachines(ctx context.Context, mp *expclusterv1.MachinePool, client client.Client, l logr.Logger) error { +func reconcileDeleteAWSMachines(ctx context.Context, mp *clusterv1.MachinePool, client client.Client, l logr.Logger) error { if !feature.Gates.Enabled(feature.MachinePoolMachines) { return errors.New("reconcileDeleteAWSMachines must not be called unless the MachinePoolMachines feature gate is enabled") } diff --git a/exp/controllers/rosamachinepool_controller_test.go b/exp/controllers/rosamachinepool_controller_test.go index f24bc0ac23..3ac08cb087 100644 --- a/exp/controllers/rosamachinepool_controller_test.go +++ b/exp/controllers/rosamachinepool_controller_test.go @@ -30,8 +30,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) @@ -73,7 +72,7 @@ func TestNodePoolToRosaMachinePoolSpec(t *testing.T) { CapacityReservationID: "capacity-reservation-id", } - machinePoolSpec := expclusterv1.MachinePoolSpec{ + machinePoolSpec := clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](2), } @@ -198,8 +197,8 @@ func TestRosaMachinePoolReconcile(t *testing.T) { } } - ownerMachinePool := func(i int) *expclusterv1.MachinePool { - return &expclusterv1.MachinePool{ + ownerMachinePool := func(i int) *clusterv1.MachinePool { + return &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("machinepool-%v", i), Namespace: ns.Name, @@ -210,7 +209,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Kind: "MachinePool", APIVersion: clusterv1.GroupVersion.String(), }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: fmt.Sprintf("owner-cluster-%v", i), Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ @@ -220,7 +219,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Name: rosaMachinePool(i).Name, Namespace: ns.Namespace, Kind: "ROSAMachinePool", - APIVersion: expclusterv1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, }, }, @@ -232,7 +231,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { name string newROSAMachinePool *expinfrav1.ROSAMachinePool oldROSAMachinePool *expinfrav1.ROSAMachinePool - machinePool *expclusterv1.MachinePool + machinePool *clusterv1.MachinePool expect func(m *mocks.MockOCMClientMockRecorder) result reconcile.Result }{ @@ -360,7 +359,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { }, { name: "Create nodepool, replicas are set in MachinePool", - machinePool: &expclusterv1.MachinePool{ + machinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: ownerMachinePool(3).Name, Namespace: ns.Name, @@ -371,7 +370,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Kind: "MachinePool", APIVersion: clusterv1.GroupVersion.String(), }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: ownerCluster(3).Name, Replicas: ptr.To[int32](2), Template: clusterv1.MachineTemplateSpec{ @@ -382,7 +381,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Name: rosaMachinePool(3).Name, Namespace: ns.Namespace, Kind: "ROSAMachinePool", - APIVersion: expclusterv1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, }, }, @@ -422,7 +421,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { }, { name: "Update nodepool, replicas are updated from MachinePool", - machinePool: &expclusterv1.MachinePool{ + machinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: ownerMachinePool(4).Name, Namespace: ns.Name, @@ -433,7 +432,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Kind: "MachinePool", APIVersion: clusterv1.GroupVersion.String(), }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: ownerCluster(4).Name, Replicas: ptr.To[int32](2), Template: clusterv1.MachineTemplateSpec{ @@ -444,7 +443,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Name: rosaMachinePool(4).Name, Namespace: ns.Namespace, Kind: "ROSAMachinePool", - APIVersion: expclusterv1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, }, }, diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go index 637cb6a19e..35f67b8f80 100644 --- a/exp/controllers/suite_test.go +++ b/exp/controllers/suite_test.go @@ -31,8 +31,7 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -53,7 +52,6 @@ func setup() { utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(corev1.AddToScheme(scheme.Scheme)) utilruntime.Must(rosacontrolplanev1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ diff --git a/exp/instancestate/suite_test.go b/exp/instancestate/suite_test.go index 2e669f7bfd..28892be7ed 100644 --- a/exp/instancestate/suite_test.go +++ b/exp/instancestate/suite_test.go @@ -30,8 +30,7 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate/mock_sqsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to @@ -55,7 +54,6 @@ func setup() { utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), }, diff --git a/main.go b/main.go index a0fcb0563b..c4b2a76496 100644 --- a/main.go +++ b/main.go @@ -65,8 +65,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/version" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/flags" ) @@ -80,7 +79,6 @@ func init() { _ = eksbootstrapv1beta1.AddToScheme(scheme) _ = cgscheme.AddToScheme(scheme) _ = clusterv1.AddToScheme(scheme) - _ = expclusterv1.AddToScheme(scheme) _ = ekscontrolplanev1.AddToScheme(scheme) _ = ekscontrolplanev1beta1.AddToScheme(scheme) _ = rosacontrolplanev1.AddToScheme(scheme) diff --git a/pkg/cloud/interfaces.go b/pkg/cloud/interfaces.go index 82ef9ad363..d4eaaf2994 100644 --- a/pkg/cloud/interfaces.go +++ b/pkg/cloud/interfaces.go @@ -27,7 +27,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/pkg/cloud/scope/elb.go b/pkg/cloud/scope/elb.go index 3d588f665b..4e3ce80c4f 100644 --- a/pkg/cloud/scope/elb.go +++ b/pkg/cloud/scope/elb.go @@ -19,8 +19,7 @@ package scope import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") // ELBScope is a scope for use with the ELB reconciling service. type ELBScope interface { diff --git a/pkg/cloud/scope/launchtemplate.go b/pkg/cloud/scope/launchtemplate.go index 34e84e7ff7..5551a33325 100644 --- a/pkg/cloud/scope/launchtemplate.go +++ b/pkg/cloud/scope/launchtemplate.go @@ -24,13 +24,13 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) // LaunchTemplateScope defines a scope defined around a launch template. type LaunchTemplateScope interface { - GetMachinePool() *expclusterv1.MachinePool + GetMachinePool() *clusterv1.MachinePool GetLaunchTemplate() *expinfrav1.AWSLaunchTemplate LaunchTemplateName() string GetLaunchTemplateIDStatus() string diff --git a/pkg/cloud/scope/machine_test.go b/pkg/cloud/scope/machine_test.go index f34790d061..6c509cd782 100644 --- a/pkg/cloud/scope/machine_test.go +++ b/pkg/cloud/scope/machine_test.go @@ -28,8 +28,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") func setupScheme() (*runtime.Scheme, error) { scheme := runtime.NewScheme() diff --git a/pkg/cloud/scope/session_test.go b/pkg/cloud/scope/session_test.go index 1035ca6562..5665eac46b 100644 --- a/pkg/cloud/scope/session_test.go +++ b/pkg/cloud/scope/session_test.go @@ -34,8 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/identity" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/system" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") func TestIsClusterPermittedToUsePrincipal(t *testing.T) { testCases := []struct { diff --git a/pkg/cloud/scope/shared.go b/pkg/cloud/scope/shared.go index cde09c9dff..2a4fabb487 100644 --- a/pkg/cloud/scope/shared.go +++ b/pkg/cloud/scope/shared.go @@ -27,7 +27,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" ) diff --git a/pkg/cloud/services/autoscaling/autoscalinggroup_test.go b/pkg/cloud/services/autoscaling/autoscalinggroup_test.go index da5689c32a..dbdc29a261 100644 --- a/pkg/cloud/services/autoscaling/autoscalinggroup_test.go +++ b/pkg/cloud/services/autoscaling/autoscalinggroup_test.go @@ -42,8 +42,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/autoscaling/mock_autoscalingiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestServiceGetASGByName(t *testing.T) { @@ -1270,7 +1269,7 @@ func getFakeClient() client.Client { scheme := runtime.NewScheme() _ = infrav1.AddToScheme(scheme) _ = expinfrav1.AddToScheme(scheme) - _ = expclusterv1.AddToScheme(scheme) + _ = clusterv1.AddToScheme(scheme) return fake.NewClientBuilder().WithScheme(scheme).Build() } @@ -1353,7 +1352,7 @@ func getMachinePoolScope(client client.Client, clusterScope *scope.ClusterScope) mps, err := scope.NewMachinePoolScope(scope.MachinePoolScopeParams{ Client: client, Cluster: clusterScope.Cluster, - MachinePool: &expclusterv1.MachinePool{}, + MachinePool: &clusterv1.MachinePool{}, InfraCluster: clusterScope, AWSMachinePool: awsMachinePool, }) diff --git a/pkg/cloud/services/ec2/bastion_test.go b/pkg/cloud/services/ec2/bastion_test.go index e48a540935..9bb5d304fb 100644 --- a/pkg/cloud/services/ec2/bastion_test.go +++ b/pkg/cloud/services/ec2/bastion_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestServiceDeleteBastion(t *testing.T) { @@ -450,10 +450,10 @@ func TestServiceReconcileBastion(t *testing.T) { ID: "vpcID", }, Subnets: infrav1.Subnets{ - { + infrav1.SubnetSpec{ ID: "subnet-1", }, - { + infrav1.SubnetSpec{ ID: "subnet-2", IsPublic: true, }, @@ -682,10 +682,10 @@ func TestServiceReconcileBastionUSGOV(t *testing.T) { ID: "vpcID", }, Subnets: infrav1.Subnets{ - { + infrav1.SubnetSpec{ ID: "subnet-1", }, - { + infrav1.SubnetSpec{ ID: "subnet-2", IsPublic: true, }, diff --git a/pkg/cloud/services/ec2/helper_test.go b/pkg/cloud/services/ec2/helper_test.go index bd40c2b7bb..550e9d7eb4 100644 --- a/pkg/cloud/services/ec2/helper_test.go +++ b/pkg/cloud/services/ec2/helper_test.go @@ -31,8 +31,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func setupClusterScope(cl client.Client) (*scope.ClusterScope, error) { @@ -164,8 +163,8 @@ func newAWSManagedControlPlane() *ekscontrolplanev1.AWSManagedControlPlane { } } -func newMachinePool() *v1beta1.MachinePool { - return &v1beta1.MachinePool{ +func newMachinePool() *clusterv1.MachinePool { + return &clusterv1.MachinePool{ TypeMeta: metav1.TypeMeta{ Kind: "MachinePool", APIVersion: "v1", @@ -173,7 +172,7 @@ func newMachinePool() *v1beta1.MachinePool { ObjectMeta: metav1.ObjectMeta{ Name: "mp", }, - Spec: v1beta1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Version: ptr.To[string]("v1.23.3"), @@ -206,7 +205,7 @@ func setupScheme() (*runtime.Scheme, error) { if err := ekscontrolplanev1.AddToScheme(scheme); err != nil { return nil, err } - if err := v1beta1.AddToScheme(scheme); err != nil { + if err := clusterv1.AddToScheme(scheme); err != nil { return nil, err } return scheme, nil diff --git a/pkg/cloud/services/ec2/instances_test.go b/pkg/cloud/services/ec2/instances_test.go index b6c7c69d23..e8c80df03e 100644 --- a/pkg/cloud/services/ec2/instances_test.go +++ b/pkg/cloud/services/ec2/instances_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestInstanceIfExists(t *testing.T) { @@ -1687,7 +1687,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "matching-subnet", }}, }, @@ -1813,7 +1813,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "subnet-1", }}, }, @@ -1905,7 +1905,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "subnet-1", }}, }, @@ -2031,7 +2031,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "subnet-1", AvailabilityZone: "us-west-1b", }}, @@ -2126,7 +2126,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "private-subnet-1", AvailabilityZone: "us-east-1b", IsPublic: false, @@ -2211,7 +2211,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "public-subnet-1", IsPublic: true, }}, @@ -2340,7 +2340,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "public-subnet-1", IsPublic: true, }}, @@ -2611,7 +2611,7 @@ func TestCreateInstance(t *testing.T) { VPC: infrav1.VPCSpec{ ID: "vpc-id", }, - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: "private-subnet-1", IsPublic: false, }}, diff --git a/pkg/cloud/services/ec2/launchtemplate_test.go b/pkg/cloud/services/ec2/launchtemplate_test.go index fd4ff8c81a..fff8074586 100644 --- a/pkg/cloud/services/ec2/launchtemplate_test.go +++ b/pkg/cloud/services/ec2/launchtemplate_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm/mock_ssmiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/pkg/cloud/services/eks/cluster_test.go b/pkg/cloud/services/eks/cluster_test.go index 25d69aaaf8..91972c9fa4 100644 --- a/pkg/cloud/services/eks/cluster_test.go +++ b/pkg/cloud/services/eks/cluster_test.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/mock_eksiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestMakeEKSEncryptionConfigs(t *testing.T) { diff --git a/pkg/cloud/services/eks/config.go b/pkg/cloud/services/eks/config.go index 153f293682..ba11e4bcd7 100644 --- a/pkg/cloud/services/eks/config.go +++ b/pkg/cloud/services/eks/config.go @@ -35,7 +35,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" diff --git a/pkg/cloud/services/eks/config_test.go b/pkg/cloud/services/eks/config_test.go index d6f64bd071..f1d0d2d299 100644 --- a/pkg/cloud/services/eks/config_test.go +++ b/pkg/cloud/services/eks/config_test.go @@ -20,7 +20,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/secret" ) diff --git a/pkg/cloud/services/eks/nodegroup.go b/pkg/cloud/services/eks/nodegroup.go index eb1430ffe6..708ccaf6b1 100644 --- a/pkg/cloud/services/eks/nodegroup.go +++ b/pkg/cloud/services/eks/nodegroup.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" ) diff --git a/pkg/cloud/services/eks/oidc_test.go b/pkg/cloud/services/eks/oidc_test.go index 8a57b330e1..5f92151030 100644 --- a/pkg/cloud/services/eks/oidc_test.go +++ b/pkg/cloud/services/eks/oidc_test.go @@ -43,8 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/testcert" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") func TestOIDCReconcile(t *testing.T) { testCertThumbprint := getTestcertTumbprint(t) diff --git a/pkg/cloud/services/eks/roles.go b/pkg/cloud/services/eks/roles.go index 6d813ef416..0e32c36359 100644 --- a/pkg/cloud/services/eks/roles.go +++ b/pkg/cloud/services/eks/roles.go @@ -31,8 +31,7 @@ import ( eksiam "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/iam" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" -) + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") const ( maxIAMRoleNameLength = 64 diff --git a/pkg/cloud/services/gc/cleanup_test.go b/pkg/cloud/services/gc/cleanup_test.go index 363ed94364..0be1138c27 100644 --- a/pkg/cloud/services/gc/cleanup_test.go +++ b/pkg/cloud/services/gc/cleanup_test.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestReconcileDelete(t *testing.T) { diff --git a/pkg/cloud/services/iamauth/reconcile.go b/pkg/cloud/services/iamauth/reconcile.go index c3a6407940..0f47a98634 100644 --- a/pkg/cloud/services/iamauth/reconcile.go +++ b/pkg/cloud/services/iamauth/reconcile.go @@ -30,8 +30,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // ReconcileIAMAuthenticator is used to create the aws-iam-authenticator in a cluster. @@ -152,7 +151,7 @@ func (s *Service) getRolesForMachineDeployments(ctx context.Context, allRoles ma } func (s *Service) getRolesForMachinePools(ctx context.Context, allRoles map[string]struct{}) error { - machinePoolList := &expclusterv1.MachinePoolList{} + machinePoolList := &clusterv1.MachinePoolList{} selectors := []client.ListOption{ client.InNamespace(s.scope.Namespace()), client.MatchingLabels{ diff --git a/pkg/cloud/services/iamauth/reconcile_test.go b/pkg/cloud/services/iamauth/reconcile_test.go index 91b1d4b9a0..ae8ae86556 100644 --- a/pkg/cloud/services/iamauth/reconcile_test.go +++ b/pkg/cloud/services/iamauth/reconcile_test.go @@ -31,8 +31,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" ) @@ -146,8 +145,8 @@ func createAWSMachinePoolForClusterWithInstanceProfile(name, namespace, clusterN return awsMP } -func createMachinepoolForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *expclusterv1.MachinePool { - mp := &expclusterv1.MachinePool{ +func createMachinepoolForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *clusterv1.MachinePool { + mp := &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -155,7 +154,7 @@ func createMachinepoolForCluster(name, namespace, clusterName string, infrastruc clusterv1.ClusterNameLabel: clusterName, }, }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: clusterName, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ diff --git a/pkg/cloud/services/iamauth/suite_test.go b/pkg/cloud/services/iamauth/suite_test.go index d94ce1bfaf..eaf1dda70f 100644 --- a/pkg/cloud/services/iamauth/suite_test.go +++ b/pkg/cloud/services/iamauth/suite_test.go @@ -29,8 +29,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( @@ -48,9 +47,7 @@ func setup() { utilruntime.Must(infrav1.AddToScheme(scheme.Scheme)) utilruntime.Must(ekscontrolplanev1.AddToScheme(scheme.Scheme)) utilruntime.Must(expinfrav1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) utilruntime.Must(clusterv1.AddToScheme(scheme.Scheme)) - utilruntime.Must(expclusterv1.AddToScheme(scheme.Scheme)) testEnvConfig := helpers.NewTestEnvironmentConfiguration([]string{ path.Join("config", "crd", "bases"), diff --git a/pkg/cloud/services/instancestate/helpers_test.go b/pkg/cloud/services/instancestate/helpers_test.go index 5e004e08f5..d2d10b05ca 100644 --- a/pkg/cloud/services/instancestate/helpers_test.go +++ b/pkg/cloud/services/instancestate/helpers_test.go @@ -23,7 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func setupCluster(clusterName string) (*scope.ClusterScope, error) { diff --git a/pkg/cloud/services/network/carriergateways_test.go b/pkg/cloud/services/network/carriergateways_test.go index c23a873c02..a1bc0d8ac6 100644 --- a/pkg/cloud/services/network/carriergateways_test.go +++ b/pkg/cloud/services/network/carriergateways_test.go @@ -33,7 +33,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestReconcileCarrierGateway(t *testing.T) { diff --git a/pkg/cloud/services/network/egress_only_gateways_test.go b/pkg/cloud/services/network/egress_only_gateways_test.go index ff12058f12..56d65d9880 100644 --- a/pkg/cloud/services/network/egress_only_gateways_test.go +++ b/pkg/cloud/services/network/egress_only_gateways_test.go @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestReconcileEgressOnlyInternetGateways(t *testing.T) { diff --git a/pkg/cloud/services/network/eips_test.go b/pkg/cloud/services/network/eips_test.go index 53dbc23dd2..643f9bb177 100644 --- a/pkg/cloud/services/network/eips_test.go +++ b/pkg/cloud/services/network/eips_test.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestServiceReleaseAddresses(t *testing.T) { diff --git a/pkg/cloud/services/network/gateways_test.go b/pkg/cloud/services/network/gateways_test.go index 62d35e3b69..48c0b5b047 100644 --- a/pkg/cloud/services/network/gateways_test.go +++ b/pkg/cloud/services/network/gateways_test.go @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestReconcileInternetGateways(t *testing.T) { diff --git a/pkg/cloud/services/network/natgateways_test.go b/pkg/cloud/services/network/natgateways_test.go index 79277625b5..97d7ff8900 100644 --- a/pkg/cloud/services/network/natgateways_test.go +++ b/pkg/cloud/services/network/natgateways_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/pkg/cloud/services/network/routetables_test.go b/pkg/cloud/services/network/routetables_test.go index eb131b8217..6b81c91585 100644 --- a/pkg/cloud/services/network/routetables_test.go +++ b/pkg/cloud/services/network/routetables_test.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestReconcileRouteTables(t *testing.T) { diff --git a/pkg/cloud/services/network/secondarycidr_test.go b/pkg/cloud/services/network/secondarycidr_test.go index 3296072299..d2e9b46a35 100644 --- a/pkg/cloud/services/network/secondarycidr_test.go +++ b/pkg/cloud/services/network/secondarycidr_test.go @@ -35,7 +35,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func setupNewManagedControlPlaneScope(cl client.Client) (*scope.ManagedControlPlaneScope, error) { diff --git a/pkg/cloud/services/network/subnets_test.go b/pkg/cloud/services/network/subnets_test.go index f14c9b7deb..48238d2f77 100644 --- a/pkg/cloud/services/network/subnets_test.go +++ b/pkg/cloud/services/network/subnets_test.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/pkg/cloud/services/network/vpc_test.go b/pkg/cloud/services/network/vpc_test.go index 9c2f5f3a22..0c5f086962 100644 --- a/pkg/cloud/services/network/vpc_test.go +++ b/pkg/cloud/services/network/vpc_test.go @@ -34,7 +34,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func describeVpcAttributeTrue(_ context.Context, input *ec2.DescribeVpcAttributeInput, _ ...ec2.Options) (*ec2.DescribeVpcAttributeOutput, error) { diff --git a/pkg/cloud/services/s3/s3_test.go b/pkg/cloud/services/s3/s3_test.go index 378d3114d3..22e4475877 100644 --- a/pkg/cloud/services/s3/s3_test.go +++ b/pkg/cloud/services/s3/s3_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3/mock_s3iface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) const ( diff --git a/pkg/cloud/services/secretsmanager/secret_test.go b/pkg/cloud/services/secretsmanager/secret_test.go index 2f9b83dc40..c7d898a034 100644 --- a/pkg/cloud/services/secretsmanager/secret_test.go +++ b/pkg/cloud/services/secretsmanager/secret_test.go @@ -35,7 +35,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func TestServiceCreate(t *testing.T) { diff --git a/pkg/cloud/services/securitygroup/securitygroups_test.go b/pkg/cloud/services/securitygroup/securitygroups_test.go index 2fd1cc64db..ee20404063 100644 --- a/pkg/cloud/services/securitygroup/securitygroups_test.go +++ b/pkg/cloud/services/securitygroup/securitygroups_test.go @@ -41,7 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( diff --git a/pkg/cloud/services/ssm/secret_test.go b/pkg/cloud/services/ssm/secret_test.go index d140cc9ecb..abb06b6d49 100644 --- a/pkg/cloud/services/ssm/secret_test.go +++ b/pkg/cloud/services/ssm/secret_test.go @@ -38,7 +38,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm/mock_ssmiface" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) type mockAPIError struct { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index c056042fba..eba75fe50d 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -9,20 +9,19 @@ import ( "k8s.io/utils/ptr" crclient "sigs.k8s.io/controller-runtime/pkg/client" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GetMachinePools belong to a cluster. -func GetMachinePools(ctx context.Context, client crclient.Client, clusterName string, clusterNS string) ([]expclusterv1.MachinePool, error) { - machinePoolList := expclusterv1.MachinePoolList{} +func GetMachinePools(ctx context.Context, client crclient.Client, clusterName string, clusterNS string) ([]clusterv1.MachinePool, error) { + machinePoolList := clusterv1.MachinePoolList{} listOptions := []crclient.ListOption{ crclient.InNamespace(clusterNS), crclient.MatchingLabels(map[string]string{clusterv1.ClusterNameLabel: clusterName}), } if err := client.List(ctx, &machinePoolList, listOptions...); err != nil { - return []expclusterv1.MachinePool{}, fmt.Errorf("failed to list machine pools for cluster %s: %v", clusterName, err) + return []clusterv1.MachinePool{}, fmt.Errorf("failed to list machine pools for cluster %s: %v", clusterName, err) } return machinePoolList.Items, nil diff --git a/test/e2e/shared/common.go b/test/e2e/shared/common.go index 53bce01ae4..56e65a7763 100644 --- a/test/e2e/shared/common.go +++ b/test/e2e/shared/common.go @@ -37,7 +37,7 @@ import ( crclient "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" diff --git a/test/e2e/suites/managed/control_plane_helpers.go b/test/e2e/suites/managed/control_plane_helpers.go index 0178236d32..b21bcf4c10 100644 --- a/test/e2e/suites/managed/control_plane_helpers.go +++ b/test/e2e/suites/managed/control_plane_helpers.go @@ -33,8 +33,7 @@ import ( crclient "sigs.k8s.io/controller-runtime/pkg/client" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" clusterctl "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) diff --git a/test/e2e/suites/managed/machine_deployment.go b/test/e2e/suites/managed/machine_deployment.go index 4ef19a0f8d..5fef37e5d2 100644 --- a/test/e2e/suites/managed/machine_deployment.go +++ b/test/e2e/suites/managed/machine_deployment.go @@ -30,8 +30,7 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) diff --git a/test/e2e/suites/managed/machine_deployment_helpers.go b/test/e2e/suites/managed/machine_deployment_helpers.go index e156b4ac51..5513809e7b 100644 --- a/test/e2e/suites/managed/machine_deployment_helpers.go +++ b/test/e2e/suites/managed/machine_deployment_helpers.go @@ -28,8 +28,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" ) type deleteMachineDeploymentInput struct { diff --git a/test/e2e/suites/managed/machine_pool_helpers.go b/test/e2e/suites/managed/machine_pool_helpers.go index b34eb7b1b8..9c8f6c7ddc 100644 --- a/test/e2e/suites/managed/machine_pool_helpers.go +++ b/test/e2e/suites/managed/machine_pool_helpers.go @@ -28,12 +28,12 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1igs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" ) type deleteMachinePoolInput struct { - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool Deleter framework.Deleter } @@ -43,14 +43,14 @@ func deleteMachinePool(ctx context.Context, input deleteMachinePoolInput) { } type waitForMachinePoolDeletedInput struct { - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool Getter framework.Getter } func waitForMachinePoolDeleted(ctx context.Context, input waitForMachinePoolDeletedInput, intervals ...interface{}) { By(fmt.Sprintf("Waiting for machine pool %s to be deleted", input.MachinePool.GetName())) Eventually(func() bool { - mp := &expclusterv1.MachinePool{} + mp := &clusterv1.MachinePool{} key := client.ObjectKey{ Namespace: input.MachinePool.GetNamespace(), Name: input.MachinePool.GetName(), diff --git a/test/e2e/suites/managed/managed_suite_test.go b/test/e2e/suites/managed/managed_suite_test.go index 15fc0d0b81..f8f4ec596d 100644 --- a/test/e2e/suites/managed/managed_suite_test.go +++ b/test/e2e/suites/managed/managed_suite_test.go @@ -32,8 +32,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( @@ -90,7 +89,6 @@ func initScheme() *runtime.Scheme { _ = expinfrav1.AddToScheme(sc) _ = clusterv1.AddToScheme(sc) _ = ekscontrolplanev1.AddToScheme(sc) - _ = expclusterv1.AddToScheme(sc) return sc } diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index d8c626f079..2918a3d8fe 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -51,7 +51,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" diff --git a/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go b/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go index 1ef8cf8950..dc1128e47f 100644 --- a/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go @@ -40,8 +40,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index f4d6d42e94..17a9c75652 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -41,8 +41,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/exp/instancestate" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index 43f0618b0c..0663a9d768 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -51,7 +51,7 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" utilyaml "sigs.k8s.io/cluster-api/util/yaml" ) diff --git a/test/mocks/capa_clusterscoper_mock.go b/test/mocks/capa_clusterscoper_mock.go index e3664a61e0..06c078aa5d 100644 --- a/test/mocks/capa_clusterscoper_mock.go +++ b/test/mocks/capa_clusterscoper_mock.go @@ -33,7 +33,7 @@ import ( cloud "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" throttle "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" logger "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - v1beta1 "sigs.k8s.io/cluster-api/api/v1beta1" + v1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" client "sigs.k8s.io/controller-runtime/pkg/client" ) diff --git a/util/conditions/helper.go b/util/conditions/helper.go index 2acb09093e..9dfd97b919 100644 --- a/util/conditions/helper.go +++ b/util/conditions/helper.go @@ -18,8 +18,8 @@ limitations under the License. package conditions import ( - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ErrorConditionAfterInit returns severity error, if the control plane is initialized; otherwise, returns severity warning. diff --git a/util/paused/paused.go b/util/paused/paused.go index 7750ded6d6..6cb56a50da 100644 --- a/util/paused/paused.go +++ b/util/paused/paused.go @@ -32,10 +32,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ConditionSetter combines the client.Object and Setter interface. diff --git a/util/paused/paused_test.go b/util/paused/paused_test.go index 6165263462..02f706a438 100644 --- a/util/paused/paused_test.go +++ b/util/paused/paused_test.go @@ -27,8 +27,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/test/builder" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/test/builder" ) func TestEnsurePausedCondition(t *testing.T) { @@ -51,7 +51,7 @@ func TestEnsurePausedCondition(t *testing.T) { pausedCluster.Spec.Paused = true // Object case 1: unpaused - obj := &builder.Phase1Obj{ObjectMeta: metav1.ObjectMeta{ + obj := &builder.Phase2Obj{ObjectMeta: metav1.ObjectMeta{ Name: "some-object", Namespace: "default", }} @@ -96,7 +96,7 @@ func TestEnsurePausedCondition(t *testing.T) { g := NewWithT(t) ctx := context.Background() - c := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&clusterv1.Cluster{}, &builder.Phase1Obj{}). + c := fake.NewClientBuilder().WithScheme(scheme).WithStatusSubresource(&clusterv1.Cluster{}, &builder.Phase2Obj{}). WithObjects(tt.object, tt.cluster).Build() g.Expect(c.Get(ctx, client.ObjectKeyFromObject(tt.object), tt.object)).To(Succeed()) From 619e66af2a082a4563e4311bc17906b2b4a16ba4 Mon Sep 17 00:00:00 2001 From: Bryan Cox Date: Fri, 5 Sep 2025 12:28:34 -0400 Subject: [PATCH 4/9] WIP IDE Errors --- .../eks/controllers/eksconfig_controller.go | 11 ++-- .../eksconfig_controller_reconciler_test.go | 3 +- controllers/awscluster_controller.go | 2 +- controllers/awsmachine_controller.go | 2 +- controllers/awsmachine_controller_test.go | 2 +- .../awsmachine_controller_unit_test.go | 2 +- controllers/awsmanagedcluster_controller.go | 2 +- controllers/helpers_test.go | 2 +- .../awsmanagedcontrolplane_controller.go | 2 +- .../rosacontrolplane_controller.go | 2 +- .../rosacontrolplane_controller_test.go | 2 +- exp/controllers/awsfargatepool_controller.go | 2 +- exp/controllers/awsmachinepool_controller.go | 15 +++-- .../awsmachinepool_controller_test.go | 59 +++++++++---------- .../awsmanagedmachinepool_controller.go | 7 +-- exp/controllers/rosamachinepool_controller.go | 9 ++- pkg/cloud/scope/cluster.go | 2 +- pkg/cloud/scope/fargate.go | 2 +- pkg/cloud/scope/machine.go | 2 +- pkg/cloud/scope/machinepool.go | 9 ++- pkg/cloud/scope/managedcontrolplane.go | 2 +- pkg/cloud/scope/managednodegroup.go | 9 ++- pkg/cloud/scope/rosacontrolplane.go | 2 +- pkg/cloud/scope/rosamachinepool.go | 7 +-- pkg/cloud/scope/session.go | 2 +- .../services/autoscaling/lifecyclehook.go | 2 +- pkg/cloud/services/ec2/bastion.go | 2 +- pkg/cloud/services/ec2/instances.go | 2 +- pkg/cloud/services/ec2/launchtemplate.go | 2 +- pkg/cloud/services/eks/cluster.go | 2 +- pkg/cloud/services/eks/eks.go | 2 +- pkg/cloud/services/eks/fargate.go | 2 +- pkg/cloud/services/elb/loadbalancer.go | 2 +- pkg/cloud/services/elb/loadbalancer_test.go | 14 ++--- pkg/cloud/services/network/natgateways.go | 2 +- pkg/cloud/services/network/network.go | 2 +- pkg/cloud/services/network/vpc.go | 4 +- .../services/securitygroup/securitygroups.go | 4 +- 38 files changed, 97 insertions(+), 106 deletions(-) diff --git a/bootstrap/eks/controllers/eksconfig_controller.go b/bootstrap/eks/controllers/eksconfig_controller.go index ca55199a6b..11351a0c72 100644 --- a/bootstrap/eks/controllers/eksconfig_controller.go +++ b/bootstrap/eks/controllers/eksconfig_controller.go @@ -41,13 +41,12 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" bsutil "sigs.k8s.io/cluster-api/bootstrap/util" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -301,7 +300,7 @@ func (r *EKSConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Man if feature.Gates.Enabled(feature.MachinePool) { b = b.Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(r.MachinePoolToBootstrapMapFunc), ) } @@ -382,7 +381,7 @@ func (r *EKSConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o cli func (r *EKSConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, o client.Object) []ctrl.Request { result := []ctrl.Request{} - m, ok := o.(*expclusterv1.MachinePool) + m, ok := o.(*clusterv1.MachinePool) if !ok { klog.Errorf("Expected a MachinePool but got a %T", o) } diff --git a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go index 163b94a338..3f5bfd1b10 100644 --- a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go +++ b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go @@ -30,8 +30,7 @@ import ( eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/internal/userdata" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/controllers/awscluster_controller.go b/controllers/awscluster_controller.go index d0ffbbc462..23cc9e45ba 100644 --- a/controllers/awscluster_controller.go +++ b/controllers/awscluster_controller.go @@ -49,7 +49,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/v2/util/conditions" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" capiannotations "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" diff --git a/controllers/awsmachine_controller.go b/controllers/awsmachine_controller.go index 445bab678c..079bbf9164 100644 --- a/controllers/awsmachine_controller.go +++ b/controllers/awsmachine_controller.go @@ -60,7 +60,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" diff --git a/controllers/awsmachine_controller_test.go b/controllers/awsmachine_controller_test.go index c2165e16ef..63abd127ef 100644 --- a/controllers/awsmachine_controller_test.go +++ b/controllers/awsmachine_controller_test.go @@ -43,7 +43,7 @@ import ( elbService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/controllers/awsmachine_controller_unit_test.go b/controllers/awsmachine_controller_unit_test.go index e5e9827bdd..1859ad7baf 100644 --- a/controllers/awsmachine_controller_unit_test.go +++ b/controllers/awsmachine_controller_unit_test.go @@ -52,7 +52,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" "sigs.k8s.io/cluster-api/util" ) diff --git a/controllers/awsmanagedcluster_controller.go b/controllers/awsmanagedcluster_controller.go index 560191634b..9804db0b15 100644 --- a/controllers/awsmanagedcluster_controller.go +++ b/controllers/awsmanagedcluster_controller.go @@ -36,7 +36,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index 05f103cfb6..f754583667 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -36,7 +36,7 @@ import ( elbService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go index 1a3a3583d5..8605228781 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go @@ -52,7 +52,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller.go b/controlplane/rosa/controllers/rosacontrolplane_controller.go index bfdc650ff8..187365e7b6 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller.go @@ -65,7 +65,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/kubeconfig" diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller_test.go b/controlplane/rosa/controllers/rosacontrolplane_controller_test.go index 9e293d32ca..4ce5164e07 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller_test.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller_test.go @@ -53,7 +53,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) diff --git a/exp/controllers/awsfargatepool_controller.go b/exp/controllers/awsfargatepool_controller.go index b4fbb0f99d..ad6c6c20a4 100644 --- a/exp/controllers/awsfargatepool_controller.go +++ b/exp/controllers/awsfargatepool_controller.go @@ -35,7 +35,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" diff --git a/exp/controllers/awsmachinepool_controller.go b/exp/controllers/awsmachinepool_controller.go index 8ac4b2b6eb..4dec26b89c 100644 --- a/exp/controllers/awsmachinepool_controller.go +++ b/exp/controllers/awsmachinepool_controller.go @@ -53,8 +53,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -229,7 +228,7 @@ func (r *AWSMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ctr WithOptions(options). For(&expinfrav1.AWSMachinePool{}). Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(expinfrav1.GroupVersion.WithKind("AWSMachinePool"))), ). WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(mgr.GetScheme(), logger.FromContext(ctx).GetLogger(), r.WatchFilterValue)). @@ -663,7 +662,7 @@ func diffASG(machinePoolScope *scope.MachinePoolScope, existingASG *expinfrav1.A } // getOwnerMachinePool returns the MachinePool object owning the current resource. -func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*expclusterv1.MachinePool, error) { +func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.ObjectMeta) (*clusterv1.MachinePool, error) { for _, ref := range obj.OwnerReferences { if ref.Kind != "MachinePool" { continue @@ -672,7 +671,7 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object if err != nil { return nil, errors.WithStack(err) } - if gv.Group == expclusterv1.GroupVersion.Group { + if gv.Group == clusterv1.GroupVersion.Group { return getMachinePoolByName(ctx, c, obj.Namespace, ref.Name) } } @@ -680,8 +679,8 @@ func getOwnerMachinePool(ctx context.Context, c client.Client, obj metav1.Object } // getMachinePoolByName finds and return a Machine object using the specified params. -func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*expclusterv1.MachinePool, error) { - m := &expclusterv1.MachinePool{} +func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name string) (*clusterv1.MachinePool, error) { + m := &clusterv1.MachinePool{} key := client.ObjectKey{Name: name, Namespace: namespace} if err := c.Get(ctx, key, m); err != nil { return nil, err @@ -691,7 +690,7 @@ func getMachinePoolByName(ctx context.Context, c client.Client, namespace, name func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.MapFunc { return func(ctx context.Context, o client.Object) []reconcile.Request { - m, ok := o.(*expclusterv1.MachinePool) + m, ok := o.(*clusterv1.MachinePool) if !ok { klog.Errorf("Expected a MachinePool but got a %T", o) } diff --git a/exp/controllers/awsmachinepool_controller_test.go b/exp/controllers/awsmachinepool_controller_test.go index 933f6c3187..694e3cd6dc 100644 --- a/exp/controllers/awsmachinepool_controller_test.go +++ b/exp/controllers/awsmachinepool_controller_test.go @@ -55,8 +55,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/labels/format" "sigs.k8s.io/cluster-api/util/patch" @@ -150,7 +149,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { InfrastructureReady: true, }, }, - MachinePool: &expclusterv1.MachinePool{ + MachinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: "mp", Namespace: "default", @@ -160,7 +159,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { APIVersion: "cluster.x-k8s.io/v1beta1", Kind: "MachinePool", }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ ClusterName: "test", Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ @@ -1428,8 +1427,8 @@ func TestDiffASG(t *testing.T) { name: "replicas != asg.desiredCapacity", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, @@ -1444,8 +1443,8 @@ func TestDiffASG(t *testing.T) { name: "replicas (nil) != asg.desiredCapacity", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: nil, }, }, @@ -1460,8 +1459,8 @@ func TestDiffASG(t *testing.T) { name: "replicas != asg.desiredCapacity (nil)", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, @@ -1476,8 +1475,8 @@ func TestDiffASG(t *testing.T) { name: "maxSize != asg.maxSize", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1nePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1498,8 +1497,8 @@ func TestDiffASG(t *testing.T) { name: "minSize != asg.minSize", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1522,8 +1521,8 @@ func TestDiffASG(t *testing.T) { name: "capacityRebalance != asg.capacityRebalance", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1548,8 +1547,8 @@ func TestDiffASG(t *testing.T) { name: "MixedInstancesPolicy != asg.MixedInstancesPolicy", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1582,8 +1581,8 @@ func TestDiffASG(t *testing.T) { name: "MixedInstancesPolicy.InstancesDistribution != asg.MixedInstancesPolicy.InstancesDistribution", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1635,8 +1634,8 @@ func TestDiffASG(t *testing.T) { name: "MixedInstancesPolicy.InstancesDistribution unset", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1682,8 +1681,8 @@ func TestDiffASG(t *testing.T) { name: "SuspendProcesses != asg.SuspendProcesses", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1723,8 +1722,8 @@ func TestDiffASG(t *testing.T) { name: "all matches", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, @@ -1761,13 +1760,13 @@ func TestDiffASG(t *testing.T) { name: "externally managed annotation ignores difference between desiredCapacity and replicas", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ + MachinePool: &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{ clusterv1.ReplicasManagedByAnnotation: "", // empty value counts as true (= externally managed) }, }, - Spec: expclusterv1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, @@ -1785,8 +1784,8 @@ func TestDiffASG(t *testing.T) { name: "without externally managed annotation ignores difference between desiredCapacity and replicas", args: args{ machinePoolScope: &scope.MachinePoolScope{ - MachinePool: &expclusterv1.MachinePool{ - Spec: expclusterv1.MachinePoolSpec{ + MachinePool: &clusterv1.MachinePool{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](0), }, }, diff --git a/exp/controllers/awsmanagedmachinepool_controller.go b/exp/controllers/awsmanagedmachinepool_controller.go index 7368d08dcd..62bdc8b02f 100644 --- a/exp/controllers/awsmanagedmachinepool_controller.go +++ b/exp/controllers/awsmanagedmachinepool_controller.go @@ -44,8 +44,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" @@ -76,7 +75,7 @@ func (r *AWSManagedMachinePoolReconciler) SetupWithManager(ctx context.Context, WithOptions(options). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log.GetLogger(), r.WatchFilterValue)). Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(gvk)), ). Watches( @@ -336,7 +335,7 @@ func managedControlPlaneToManagedMachinePoolMapFunc(c client.Client, gvk schema. return nil } - managedPoolForClusterList := expclusterv1.MachinePoolList{} + managedPoolForClusterList := clusterv1.MachinePoolList{} if err := c.List( ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}, ); err != nil { diff --git a/exp/controllers/rosamachinepool_controller.go b/exp/controllers/rosamachinepool_controller.go index f02110050b..00388ffc3d 100644 --- a/exp/controllers/rosamachinepool_controller.go +++ b/exp/controllers/rosamachinepool_controller.go @@ -37,8 +37,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" @@ -71,7 +70,7 @@ func (r *ROSAMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr ct WithOptions(options). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log.GetLogger(), r.WatchFilterValue)). Watches( - &expclusterv1.MachinePool{}, + &clusterv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(machinePoolToInfrastructureMapFunc(gvk)), ). Watches( @@ -459,7 +458,7 @@ func validateMachinePoolSpec(machinePoolScope *scope.RosaMachinePoolScope) (*str return nil, nil } -func nodePoolBuilder(rosaMachinePoolSpec expinfrav1.RosaMachinePoolSpec, machinePoolSpec expclusterv1.MachinePoolSpec, controlPlaneChannelGroup rosacontrolplanev1.ChannelGroupType) *cmv1.NodePoolBuilder { +func nodePoolBuilder(rosaMachinePoolSpec expinfrav1.RosaMachinePoolSpec, machinePoolSpec clusterv1.MachinePoolSpec, controlPlaneChannelGroup rosacontrolplanev1.ChannelGroupType) *cmv1.NodePoolBuilder { npBuilder := cmv1.NewNodePool().ID(rosaMachinePoolSpec.NodePoolName). Labels(rosaMachinePoolSpec.Labels). AutoRepair(rosaMachinePoolSpec.AutoRepair) @@ -606,7 +605,7 @@ func rosaControlPlaneToRosaMachinePoolMapFunc(c client.Client, gvk schema.GroupV return nil } - managedPoolForClusterList := expclusterv1.MachinePoolList{} + managedPoolForClusterList := clusterv1.MachinePoolList{} if err := c.List( ctx, &managedPoolForClusterList, client.InNamespace(clusterKey.Namespace), client.MatchingLabels{clusterv1.ClusterNameLabel: clusterKey.Name}, ); err != nil { diff --git a/pkg/cloud/scope/cluster.go b/pkg/cloud/scope/cluster.go index 730b977578..5fb6b04bce 100644 --- a/pkg/cloud/scope/cluster.go +++ b/pkg/cloud/scope/cluster.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) diff --git a/pkg/cloud/scope/fargate.go b/pkg/cloud/scope/fargate.go index 6e0fe0e1ef..73eec70484 100644 --- a/pkg/cloud/scope/fargate.go +++ b/pkg/cloud/scope/fargate.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) diff --git a/pkg/cloud/scope/machine.go b/pkg/cloud/scope/machine.go index 243bd40242..756ab2d188 100644 --- a/pkg/cloud/scope/machine.go +++ b/pkg/cloud/scope/machine.go @@ -31,7 +31,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/conditions" diff --git a/pkg/cloud/scope/machinepool.go b/pkg/cloud/scope/machinepool.go index f9e7fd1225..1afe0172d3 100644 --- a/pkg/cloud/scope/machinepool.go +++ b/pkg/cloud/scope/machinepool.go @@ -34,9 +34,8 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/remote" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" @@ -50,7 +49,7 @@ type MachinePoolScope struct { capiMachinePoolPatchHelper *patch.Helper Cluster *clusterv1.Cluster - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool InfraCluster EC2Scope AWSMachinePool *expinfrav1.AWSMachinePool } @@ -61,7 +60,7 @@ type MachinePoolScopeParams struct { Logger *logger.Logger Cluster *clusterv1.Cluster - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool InfraCluster EC2Scope AWSMachinePool *expinfrav1.AWSMachinePool } @@ -380,7 +379,7 @@ func (m *MachinePoolScope) GetLaunchTemplate() *expinfrav1.AWSLaunchTemplate { } // GetMachinePool returns the machine pool object. -func (m *MachinePoolScope) GetMachinePool() *expclusterv1.MachinePool { +func (m *MachinePoolScope) GetMachinePool() *clusterv1.MachinePool { return m.MachinePool } diff --git a/pkg/cloud/scope/managedcontrolplane.go b/pkg/cloud/scope/managedcontrolplane.go index be0bc76864..3e0ddad6a8 100644 --- a/pkg/cloud/scope/managedcontrolplane.go +++ b/pkg/cloud/scope/managedcontrolplane.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/controllers/remote" "sigs.k8s.io/cluster-api/util/patch" ) diff --git a/pkg/cloud/scope/managednodegroup.go b/pkg/cloud/scope/managednodegroup.go index 7ef4663a24..c8851c500d 100644 --- a/pkg/cloud/scope/managednodegroup.go +++ b/pkg/cloud/scope/managednodegroup.go @@ -37,8 +37,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -50,7 +49,7 @@ type ManagedMachinePoolScopeParams struct { Cluster *clusterv1.Cluster ControlPlane *ekscontrolplanev1.AWSManagedControlPlane ManagedMachinePool *expinfrav1.AWSManagedMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool ControllerName string Session awsv2.Config MaxWaitActiveUpdateDelete time.Duration @@ -131,7 +130,7 @@ type ManagedMachinePoolScope struct { Cluster *clusterv1.Cluster ControlPlane *ekscontrolplanev1.AWSManagedControlPlane ManagedMachinePool *expinfrav1.AWSManagedMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool EC2Scope EC2Scope MaxWaitActiveUpdateDelete time.Duration @@ -410,7 +409,7 @@ func (s *ManagedMachinePoolScope) GetLaunchTemplate() *expinfrav1.AWSLaunchTempl } // GetMachinePool returns the machine pool. -func (s *ManagedMachinePoolScope) GetMachinePool() *expclusterv1.MachinePool { +func (s *ManagedMachinePoolScope) GetMachinePool() *clusterv1.MachinePool { return s.MachinePool } diff --git a/pkg/cloud/scope/rosacontrolplane.go b/pkg/cloud/scope/rosacontrolplane.go index 47073c2dad..c292aa59c2 100644 --- a/pkg/cloud/scope/rosacontrolplane.go +++ b/pkg/cloud/scope/rosacontrolplane.go @@ -35,7 +35,7 @@ import ( stsservice "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) diff --git a/pkg/cloud/scope/rosamachinepool.go b/pkg/cloud/scope/rosamachinepool.go index 5c53635b5b..130e091135 100644 --- a/pkg/cloud/scope/rosamachinepool.go +++ b/pkg/cloud/scope/rosamachinepool.go @@ -30,8 +30,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -43,7 +42,7 @@ type RosaMachinePoolScopeParams struct { Cluster *clusterv1.Cluster ControlPlane *rosacontrolplanev1.ROSAControlPlane RosaMachinePool *expinfrav1.ROSAMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool ControllerName string } @@ -109,7 +108,7 @@ type RosaMachinePoolScope struct { Cluster *clusterv1.Cluster ControlPlane *rosacontrolplanev1.ROSAControlPlane RosaMachinePool *expinfrav1.ROSAMachinePool - MachinePool *expclusterv1.MachinePool + MachinePool *clusterv1.MachinePool session awsv2.Config serviceLimiters throttle.ServiceLimiters diff --git a/pkg/cloud/scope/session.go b/pkg/cloud/scope/session.go index 3a18e65faf..5f1adab09f 100644 --- a/pkg/cloud/scope/session.go +++ b/pkg/cloud/scope/session.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/system" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" diff --git a/pkg/cloud/services/autoscaling/lifecyclehook.go b/pkg/cloud/services/autoscaling/lifecyclehook.go index 293070fab1..61d194e7b5 100644 --- a/pkg/cloud/services/autoscaling/lifecyclehook.go +++ b/pkg/cloud/services/autoscaling/lifecyclehook.go @@ -30,7 +30,7 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/pkg/cloud/services/ec2/bastion.go b/pkg/cloud/services/ec2/bastion.go index 8d31916530..89a8e241da 100644 --- a/pkg/cloud/services/ec2/bastion.go +++ b/pkg/cloud/services/ec2/bastion.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go index 6e5813c74a..5cfa6c55a7 100644 --- a/pkg/cloud/services/ec2/instances.go +++ b/pkg/cloud/services/ec2/instances.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // GetRunningInstanceByTags returns the existing instance or nothing if it doesn't exist. diff --git a/pkg/cloud/services/ec2/launchtemplate.go b/pkg/cloud/services/ec2/launchtemplate.go index ffe3936074..f08c9e2eea 100644 --- a/pkg/cloud/services/ec2/launchtemplate.go +++ b/pkg/cloud/services/ec2/launchtemplate.go @@ -48,7 +48,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/pkg/cloud/services/eks/cluster.go b/pkg/cloud/services/eks/cluster.go index 87f68755e6..20cf81b541 100644 --- a/pkg/cloud/services/eks/cluster.go +++ b/pkg/cloud/services/eks/cluster.go @@ -42,7 +42,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cmp" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/tristate" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/pkg/cloud/services/eks/eks.go b/pkg/cloud/services/eks/eks.go index 05b760fa44..b0a4811d8d 100644 --- a/pkg/cloud/services/eks/eks.go +++ b/pkg/cloud/services/eks/eks.go @@ -27,7 +27,7 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/pkg/cloud/services/eks/fargate.go b/pkg/cloud/services/eks/fargate.go index 06cd5ffac2..2054e8eec5 100644 --- a/pkg/cloud/services/eks/fargate.go +++ b/pkg/cloud/services/eks/fargate.go @@ -32,7 +32,7 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go index 874ea2d815..6a81816094 100644 --- a/pkg/cloud/services/elb/loadbalancer.go +++ b/pkg/cloud/services/elb/loadbalancer.go @@ -46,7 +46,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/hash" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/pkg/cloud/services/elb/loadbalancer_test.go b/pkg/cloud/services/elb/loadbalancer_test.go index d59c15c91b..66cf054ff5 100644 --- a/pkg/cloud/services/elb/loadbalancer_test.go +++ b/pkg/cloud/services/elb/loadbalancer_test.go @@ -46,7 +46,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) @@ -512,7 +512,7 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) { Name: aws.String(elbName), }, NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, @@ -577,7 +577,7 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: clusterName}, Spec: infrav1.AWSClusterSpec{ NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, @@ -660,7 +660,7 @@ func TestRegisterInstanceWithAPIServerELB(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: clusterName}, Spec: infrav1.AWSClusterSpec{ NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, @@ -816,7 +816,7 @@ func TestRegisterInstanceWithAPIServerNLB(t *testing.T) { LoadBalancerType: infrav1.LoadBalancerTypeNLB, }, NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, @@ -919,7 +919,7 @@ func TestRegisterInstanceWithAPIServerNLB(t *testing.T) { }, }, NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, @@ -1048,7 +1048,7 @@ func TestRegisterInstanceWithAPIServerNLB(t *testing.T) { ObjectMeta: metav1.ObjectMeta{Name: clusterName}, Spec: infrav1.AWSClusterSpec{ NetworkSpec: infrav1.NetworkSpec{ - Subnets: infrav1.Subnets{{ + Subnets: infrav1.Subnets{infrav1.SubnetSpec{ ID: clusterSubnetID, AvailabilityZone: az, }}, diff --git a/pkg/cloud/services/network/natgateways.go b/pkg/cloud/services/network/natgateways.go index 6bde5f5a64..8f8cad527c 100644 --- a/pkg/cloud/services/network/natgateways.go +++ b/pkg/cloud/services/network/natgateways.go @@ -36,7 +36,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/pkg/cloud/services/network/network.go b/pkg/cloud/services/network/network.go index 35aa421be7..1550a1e78f 100644 --- a/pkg/cloud/services/network/network.go +++ b/pkg/cloud/services/network/network.go @@ -22,7 +22,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/v2/util/conditions" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/pkg/cloud/services/network/vpc.go b/pkg/cloud/services/network/vpc.go index 078afd1dd7..853269eda7 100644 --- a/pkg/cloud/services/network/vpc.go +++ b/pkg/cloud/services/network/vpc.go @@ -36,8 +36,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( diff --git a/pkg/cloud/services/securitygroup/securitygroups.go b/pkg/cloud/services/securitygroup/securitygroups.go index 9de501f7c5..bd5ec437dd 100644 --- a/pkg/cloud/services/securitygroup/securitygroups.go +++ b/pkg/cloud/services/securitygroup/securitygroups.go @@ -38,8 +38,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( From 9bc1f1f65fc6a3d892640ff09309eacf4b35c37d Mon Sep 17 00:00:00 2001 From: Borja Clemente Date: Thu, 16 Oct 2025 17:29:27 +0200 Subject: [PATCH 5/9] Fix go dependencies Signed-off-by: Borja Clemente --- .../awsmachine_controller_unit_test.go | 2 +- controllers/rosacluster_controller.go | 4 +- controllers/rosacluster_controller_test.go | 4 +- controllers/suite_test.go | 4 +- .../api/v1beta2/rosacontrolplane_types.go | 2 +- .../rosa/api/v1beta2/zz_generated.deepcopy.go | 2 +- exp/api/v1beta2/rosamachinepool_types.go | 2 +- exp/api/v1beta2/rosanetwork_types.go | 2 +- exp/api/v1beta2/rosaroleconfig_types.go | 2 +- exp/api/v1beta2/zz_generated.deepcopy.go | 2 +- exp/controllers/rosanetwork_controller.go | 2 +- .../rosanetwork_controller_test.go | 2 +- exp/controllers/rosaroleconfig_controller.go | 2 +- go.mod | 93 ++--- go.sum | 332 ++++++------------ pkg/cloud/scope/rosanetwork.go | 2 +- test/e2e/suites/unmanaged/helpers_test.go | 4 +- 17 files changed, 170 insertions(+), 293 deletions(-) diff --git a/controllers/awsmachine_controller_unit_test.go b/controllers/awsmachine_controller_unit_test.go index 1859ad7baf..de1d3a8676 100644 --- a/controllers/awsmachine_controller_unit_test.go +++ b/controllers/awsmachine_controller_unit_test.go @@ -53,7 +53,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + kubeadmv1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" "sigs.k8s.io/cluster-api/util" ) diff --git a/controllers/rosacluster_controller.go b/controllers/rosacluster_controller.go index 8d228ba0f1..5a5da2214d 100644 --- a/controllers/rosacluster_controller.go +++ b/controllers/rosacluster_controller.go @@ -48,8 +48,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" diff --git a/controllers/rosacluster_controller_test.go b/controllers/rosacluster_controller_test.go index 170e5faefa..e7e5cfbe58 100644 --- a/controllers/rosacluster_controller_test.go +++ b/controllers/rosacluster_controller_test.go @@ -43,8 +43,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) diff --git a/controllers/suite_test.go b/controllers/suite_test.go index 8cd1a16b5d..b48a95dfbf 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -32,8 +32,8 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - kubeadmv1beta1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + kubeadmv1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + expclusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) var ( diff --git a/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go index 94de67ef90..3ae4eff314 100644 --- a/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go +++ b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go @@ -21,7 +21,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // RosaEndpointAccessType specifies the publishing scope of cluster endpoints. diff --git a/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go b/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go index f0cde7f69c..8c6e718edb 100644 --- a/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go +++ b/controlplane/rosa/api/v1beta2/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/exp/api/v1beta2/rosamachinepool_types.go b/exp/api/v1beta2/rosamachinepool_types.go index f23bd807d3..b57ff7ea3b 100644 --- a/exp/api/v1beta2/rosamachinepool_types.go +++ b/exp/api/v1beta2/rosamachinepool_types.go @@ -23,7 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // RosaMachinePoolSpec defines the desired state of RosaMachinePool. diff --git a/exp/api/v1beta2/rosanetwork_types.go b/exp/api/v1beta2/rosanetwork_types.go index b6a6d3634f..e1228bf2a5 100644 --- a/exp/api/v1beta2/rosanetwork_types.go +++ b/exp/api/v1beta2/rosanetwork_types.go @@ -20,7 +20,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // ROSANetworkFinalizer allows the controller to clean up resources on delete. diff --git a/exp/api/v1beta2/rosaroleconfig_types.go b/exp/api/v1beta2/rosaroleconfig_types.go index e3bdda7db9..05056e5887 100644 --- a/exp/api/v1beta2/rosaroleconfig_types.go +++ b/exp/api/v1beta2/rosaroleconfig_types.go @@ -22,7 +22,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // OidcProviderType set to Managed or UnManaged diff --git a/exp/api/v1beta2/zz_generated.deepcopy.go b/exp/api/v1beta2/zz_generated.deepcopy.go index af59aa3b75..d2fe08ef4d 100644 --- a/exp/api/v1beta2/zz_generated.deepcopy.go +++ b/exp/api/v1beta2/zz_generated.deepcopy.go @@ -27,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" rosaapiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" - "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. diff --git a/exp/controllers/rosanetwork_controller.go b/exp/controllers/rosanetwork_controller.go index bcc1183cc2..2859eaecb3 100644 --- a/exp/controllers/rosanetwork_controller.go +++ b/exp/controllers/rosanetwork_controller.go @@ -43,7 +43,7 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) diff --git a/exp/controllers/rosanetwork_controller_test.go b/exp/controllers/rosanetwork_controller_test.go index 74c775d5e4..284fc00b94 100644 --- a/exp/controllers/rosanetwork_controller_test.go +++ b/exp/controllers/rosanetwork_controller_test.go @@ -39,7 +39,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/exp/controllers/rosaroleconfig_controller.go b/exp/controllers/rosaroleconfig_controller.go index 349348039b..b136306951 100644 --- a/exp/controllers/rosaroleconfig_controller.go +++ b/exp/controllers/rosaroleconfig_controller.go @@ -51,7 +51,7 @@ import ( stsiface "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) diff --git a/go.mod b/go.mod index d34e8ef4ca..64a4ec9b9e 100644 --- a/go.mod +++ b/go.mod @@ -7,10 +7,10 @@ require ( github.com/apparentlymart/go-cidr v1.1.0 github.com/aws/amazon-vpc-cni-k8s v1.15.5 github.com/aws/aws-lambda-go v1.41.0 - github.com/aws/aws-sdk-go-v2 v1.38.0 - github.com/aws/aws-sdk-go-v2/config v1.31.0 - github.com/aws/aws-sdk-go-v2/credentials v1.18.4 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 + github.com/aws/aws-sdk-go-v2 v1.39.2 + github.com/aws/aws-sdk-go-v2/config v1.31.12 + github.com/aws/aws-sdk-go-v2/credentials v1.18.16 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12 github.com/aws/aws-sdk-go-v2/service/autoscaling v1.52.4 github.com/aws/aws-sdk-go-v2/service/cloudtrail v1.52.0 github.com/aws/aws-sdk-go-v2/service/configservice v1.56.0 @@ -22,13 +22,15 @@ require ( github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2 v1.45.2 github.com/aws/aws-sdk-go-v2/service/iam v1.32.0 github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 - github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0 + github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4 github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6 github.com/aws/aws-sdk-go-v2/service/ssm v1.59.1 - github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 - github.com/aws/smithy-go v1.22.5 + github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 + github.com/aws/smithy-go v1.23.0 github.com/awslabs/goformation/v4 v4.19.5 github.com/blang/semver v3.5.1+incompatible + github.com/coreos/ignition v0.35.0 + github.com/coreos/ignition/v2 v2.24.0 github.com/go-logr/logr v1.4.3 github.com/gofrs/flock v0.8.1 github.com/golang/mock v1.6.0 @@ -37,6 +39,7 @@ require ( github.com/google/gofuzz v1.2.0 github.com/onsi/ginkgo/v2 v2.23.4 github.com/onsi/gomega v1.38.0 + github.com/openshift-online/ocm-api-model/clientapi v0.0.431 github.com/openshift-online/ocm-common v0.0.31 github.com/openshift-online/ocm-sdk-go v0.1.476 github.com/openshift/rosa v1.2.55 @@ -45,12 +48,12 @@ require ( github.com/sergi/go-diff v1.3.1 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 - github.com/spf13/pflag v1.0.7 + github.com/spf13/pflag v1.0.10 github.com/zgalor/weberr v0.8.2 go.uber.org/mock v0.5.2 - golang.org/x/crypto v0.41.0 - golang.org/x/net v0.43.0 - golang.org/x/text v0.28.0 + golang.org/x/crypto v0.43.0 + golang.org/x/net v0.46.0 + golang.org/x/text v0.30.0 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.33.4 k8s.io/apiextensions-apiserver v0.33.4 @@ -70,8 +73,11 @@ require ( require ( github.com/AlecAivazis/survey/v2 v2.2.15 // indirect - github.com/aws/aws-sdk-go v1.55.7 // indirect - github.com/go-jose/go-jose/v4 v4.0.5 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect + github.com/coreos/go-systemd/v22 v22.6.0 // indirect + github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 // indirect + github.com/go-jose/go-jose/v4 v4.1.1 // indirect github.com/itchyny/gojq v0.12.9 // indirect github.com/itchyny/timefmt-go v0.1.4 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect @@ -87,11 +93,12 @@ require ( github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/openshift-online/ocm-api-model/model v0.0.431 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect + github.com/vincent-petithory/dataurl v1.0.0 // indirect ) require ( al.essio.dev/pkg/shellescape v1.5.1 // indirect - cel.dev/expr v0.19.1 // indirect + cel.dev/expr v0.24.0 // indirect github.com/99designs/go-keychain v0.0.0-20191008050251-8e49817e8af4 // indirect github.com/99designs/keyring v1.2.2 // indirect github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect @@ -102,23 +109,23 @@ require ( github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/adrg/xdg v0.5.3 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 // indirect github.com/aws/aws-sdk-go-v2/service/cloudformation v1.50.0 github.com/aws/aws-sdk-go-v2/service/eventbridge v1.39.3 - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 // indirect github.com/aws/aws-sdk-go-v2/service/organizations v1.27.3 // indirect github.com/aws/aws-sdk-go-v2/service/servicequotas v1.21.4 github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 - github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 // indirect github.com/aymerick/douceur v0.2.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -174,14 +181,12 @@ require ( github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/kylelemons/godebug v1.1.0 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect github.com/microcosm-cc/bluemonday v1.0.27 // indirect - github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/spdystream v0.5.0 // indirect @@ -217,37 +222,35 @@ require ( github.com/zalando/go-keyring v0.2.3 // indirect gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect - go.opentelemetry.io/otel v1.34.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 // indirect - go.opentelemetry.io/otel/metric v1.34.0 // indirect - go.opentelemetry.io/otel/sdk v1.34.0 // indirect - go.opentelemetry.io/otel/trace v1.34.0 // indirect + go.opentelemetry.io/otel/metric v1.37.0 // indirect + go.opentelemetry.io/otel/sdk v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.opentelemetry.io/proto/otlp v1.4.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect - go.uber.org/mock v0.5.2 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/net v0.42.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect - golang.org/x/sync v0.16.0 // indirect - golang.org/x/sys v0.35.0 // indirect - golang.org/x/term v0.34.0 // indirect - golang.org/x/time v0.9.0 // indirect - golang.org/x/tools v0.35.0 // indirect + golang.org/x/oauth2 v0.32.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/term v0.36.0 // indirect + golang.org/x/time v0.13.0 // indirect + golang.org/x/tools v0.37.0 // indirect gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f // indirect - google.golang.org/grpc v1.71.3 // indirect - google.golang.org/protobuf v1.36.6 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 // indirect + google.golang.org/grpc v1.75.1 // indirect + google.golang.org/protobuf v1.36.10 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiserver v0.33.4 // indirect + k8s.io/apiserver v0.33.4 k8s.io/cluster-bootstrap v0.33.3 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect diff --git a/go.sum b/go.sum index 997a9b1618..c598a26d03 100644 --- a/go.sum +++ b/go.sum @@ -1,7 +1,7 @@ al.essio.dev/pkg/shellescape v1.5.1 h1:86HrALUujYS/h+GtqoB26SBEdkWfmMI6FubjXlsXyho= al.essio.dev/pkg/shellescape v1.5.1/go.mod h1:6sIqp7X2P6mThCQ7twERpZTuigpr6KbZWtls1U8I890= -cel.dev/expr v0.19.1 h1:NciYrtDRIR0lNCnH1LFJegdjspNx9fI59O7TWcua/W4= -cel.dev/expr v0.19.1/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= +cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= @@ -20,11 +20,8 @@ github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -<<<<<<< HEAD -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -======= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs= @@ -51,26 +48,26 @@ github.com/aws/amazon-vpc-cni-k8s v1.15.5 h1:/mqTXB4HoGYg4CiU4Gco9iEvZ+V/309Na4H github.com/aws/amazon-vpc-cni-k8s v1.15.5/go.mod h1:jV4wNtmgT2Ra1/oZU99DPOFsCUKnf0mYfIyzDyAUVAY= github.com/aws/aws-lambda-go v1.41.0 h1:l/5fyVb6Ud9uYd411xdHZzSf2n86TakxzpvIoz7l+3Y= github.com/aws/aws-lambda-go v1.41.0/go.mod h1:jwFe2KmMsHmffA1X2R09hH6lFzJQxzI8qK17ewzbQMM= -github.com/aws/aws-sdk-go-v2 v1.38.0 h1:UCRQ5mlqcFk9HJDIqENSLR3wiG1VTWlyUfLDEvY7RxU= -github.com/aws/aws-sdk-go-v2 v1.38.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0 h1:6GMWV6CNpA/6fbFHnoAjrv4+LGfyTqZz2LtCHnspgDg= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.0/go.mod h1:/mXlTIVG9jbxkqDnr5UQNQxW1HRYxeGklkM9vAFeabg= -github.com/aws/aws-sdk-go-v2/config v1.31.0 h1:9yH0xiY5fUnVNLRWO0AtayqwU1ndriZdN78LlhruJR4= -github.com/aws/aws-sdk-go-v2/config v1.31.0/go.mod h1:VeV3K72nXnhbe4EuxxhzsDc/ByrCSlZwUnWH52Nde/I= -github.com/aws/aws-sdk-go-v2/credentials v1.18.4 h1:IPd0Algf1b+Qy9BcDp0sCUcIWdCQPSzDoMK3a8pcbUM= -github.com/aws/aws-sdk-go-v2/credentials v1.18.4/go.mod h1:nwg78FjH2qvsRM1EVZlX9WuGUJOL5od+0qvm0adEzHk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 h1:GicIdnekoJsjq9wqnvyi2elW6CGMSYKhdozE7/Svh78= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3/go.mod h1:R7BIi6WNC5mc1kfRM7XM/VHC3uRWkjc396sfabq4iOo= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4 h1:0SzCLoPRSK3qSydsaFQWugP+lOBCTPwfcBOm6222+UA= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.18.4/go.mod h1:JAet9FsBHjfdI+TnMBX4ModNNaQHAd3dc/Bk+cNsxeM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 h1:o9RnO+YZ4X+kt5Z7Nvcishlz0nksIt2PIzDglLMP0vA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3/go.mod h1:+6aLJzOG1fvMOyzIySYjOFjcguGvVRL68R+uoRencN4= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 h1:joyyUFhiTQQmVK6ImzNU9TQSNRNeD9kOklqTzyk5v6s= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3/go.mod h1:+vNIyZQP3b3B1tSLI0lxvrU9cfM7gpdRXMFfm67ZcPc= +github.com/aws/aws-sdk-go-v2 v1.39.2 h1:EJLg8IdbzgeD7xgvZ+I8M1e0fL0ptn/M47lianzth0I= +github.com/aws/aws-sdk-go-v2 v1.39.2/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00= +github.com/aws/aws-sdk-go-v2/config v1.31.12 h1:pYM1Qgy0dKZLHX2cXslNacbcEFMkDMl+Bcj5ROuS6p8= +github.com/aws/aws-sdk-go-v2/config v1.31.12/go.mod h1:/MM0dyD7KSDPR+39p9ZNVKaHDLb9qnfDurvVS2KAhN8= +github.com/aws/aws-sdk-go-v2/credentials v1.18.16 h1:4JHirI4zp958zC026Sm+V4pSDwW4pwLefKrc0bF2lwI= +github.com/aws/aws-sdk-go-v2/credentials v1.18.16/go.mod h1:qQMtGx9OSw7ty1yLclzLxXCRbrkjWAM7JnObZjmCB7I= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9 h1:Mv4Bc0mWmv6oDuSWTKnk+wgeqPL5DRFu5bQL9BGPQ8Y= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.9/go.mod h1:IKlKfRppK2a1y0gy1yH6zD+yX5uplJ6UuPlgd48dJiQ= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12 h1:ofHawDLJTI6ytDIji+g4dXQ6u2idzTb04tDlN9AS614= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.19.12/go.mod h1:f5pL4iLDfbcxj1SZcdRdIokBB5eHbuYPS/Fs9DwUPRQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 h1:se2vOWGD3dWQUtfn4wEjRQJb1HK1XsNIt825gskZ970= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9/go.mod h1:hijCGH2VfbZQxqCDN7bwz/4dzxV+hkyhjawAtdPWKZA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 h1:6RBnKZLkJM4hQ+kN6E7yWFveOTg8NLPHAkqrs4ZPlTU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9/go.mod h1:V9rQKRmK7AWuEsOMnHzKj8WyrIir1yUJbZxDuZLFvXI= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3 h1:ZV2XK2L3HBq9sCKQiQ/MdhZJppH/rH0vddEAamsHUIs= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.3/go.mod h1:b9F9tk2HdHpbf3xbN7rUZcfmJI26N6NcJu/8OsBFI/0= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9 h1:w9LnHqTq8MEdlnyhV4Bwfizd65lfNCNgdlNC6mM5paE= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.9/go.mod h1:LGEP6EK4nj+bwWNdrvX/FnDTFowdBNwcSPuZu/ouFys= github.com/aws/aws-sdk-go-v2/service/autoscaling v1.52.4 h1:vzLD0FyNU4uxf2QE5UDG0jSEitiJXbVEUwf2Sk3usF4= github.com/aws/aws-sdk-go-v2/service/autoscaling v1.52.4/go.mod h1:CDqMoc3KRdZJ8qziW96J35lKH01Wq3B2aihtHj2JbRs= github.com/aws/aws-sdk-go-v2/service/cloudformation v1.50.0 h1:Ap5tOJfeAH1hO2UQc3X3uMlwP7uryFeZXMvZCXIlLSE= @@ -95,20 +92,20 @@ github.com/aws/aws-sdk-go-v2/service/eventbridge v1.39.3 h1:T6L7fsONflMeXuvsT8qZ github.com/aws/aws-sdk-go-v2/service/eventbridge v1.39.3/go.mod h1:sIrUII6Z+hAVAgcpmsc2e9HvEr++m/v8aBPT7s4ZYUk= github.com/aws/aws-sdk-go-v2/service/iam v1.32.0 h1:ZNlfPdw849gBo/lvLFbEEvpTJMij0LXqiNWZ+lIamlU= github.com/aws/aws-sdk-go-v2/service/iam v1.32.0/go.mod h1:aXWImQV0uTW35LM0A/T4wEg6R1/ReXUu4SM6/lUHYK0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3 h1:3ZKmesYBaFX33czDl6mbrcHb6jeheg6LqjJhQdefhsY= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.3/go.mod h1:7ryVb78GLCnjq7cw45N6oUb9REl7/vNUwjvIqC5UgdY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 h1:ieRzyHXypu5ByllM7Sp4hC5f/1Fy5wqxqY0yB85hC7s= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3/go.mod h1:O5ROz8jHiOAKAwx179v+7sHMhfobFVi6nZt8DEyiYoM= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3 h1:SE/e52dq9a05RuxzLcjT+S5ZpQobj3ie3UTaSf2NnZc= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.3/go.mod h1:zkpvBTsR020VVr8TOrwK2TrUW9pOir28sH5ECHpnAfo= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0 h1:X0FveUndcZ3lKbSpIC6rMYGRiQTcUVRNH6X4yYtIrlU= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.0/go.mod h1:IWjQYlqw4EX9jw2g3qnEPPWvCE6bS8fKzhMed1OK7c8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9 h1:5r34CgVOD4WZudeEKZ9/iKpiT6cM1JyEROpXjOcdWv8= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.9/go.mod h1:dB12CEbNWPbzO2uC6QSWHteqOg4JfBVJOojbAoAUb5I= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9 h1:wuZ5uW2uhJR63zwNlqWH2W4aL4ZjeJP3o92/W+odDY4= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.9/go.mod h1:/G58M2fGszCrOzvJUkDdY8O9kycodunH4VdT5oBAqls= github.com/aws/aws-sdk-go-v2/service/organizations v1.27.3 h1:CnPWlONzFX9/yO6IGuKg9sWUE8WhKztYRFbhmOHXjJI= github.com/aws/aws-sdk-go-v2/service/organizations v1.27.3/go.mod h1:hUHSXe9HFEmLfHrXndAX5e69rv0nBsg22VuNQYl0JLM= github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6 h1:PwbxovpcJvb25k019bkibvJfCpCmIANOFrXZIFPmRzk= github.com/aws/aws-sdk-go-v2/service/resourcegroupstaggingapi v1.26.6/go.mod h1:Z4xLt5mXspLKjBV92i165wAJ/3T6TIv4n7RtIS8pWV0= -github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0 h1:egoDf+Geuuntmw79Mz6mk9gGmELCPzg5PFEABOHB+6Y= -github.com/aws/aws-sdk-go-v2/service/s3 v1.87.0/go.mod h1:t9MDi29H+HDbkolTSQtbI0HP9DemAWQzUjmWC7LGMnE= +github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4 h1:mUI3b885qJgfqKDUSj6RgbRqLdX0wGmg8ruM03zNfQA= +github.com/aws/aws-sdk-go-v2/service/s3 v1.88.4/go.mod h1:6v8ukAxc7z4x4oBjGUsLnH7KGLY9Uhcgij19UJNkiMg= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6 h1:TIOEjw0i2yyhmhRry3Oeu9YtiiHWISZ6j/irS1W3gX4= github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.28.6/go.mod h1:3Ba++UwWd154xtP4FRX5pUK3Gt4up5sDHCve6kVfE+g= github.com/aws/aws-sdk-go-v2/service/servicequotas v1.21.4 h1:SSDkZRAO8Ok5SoQ4BJ0onDeb0ga8JBOCkUmNEpRChcw= @@ -117,14 +114,14 @@ github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8 h1:80dpSqWMwx2dAm30Ib7J6ucz1ZHf github.com/aws/aws-sdk-go-v2/service/sqs v1.38.8/go.mod h1:IzNt/udsXlETCdvBOL0nmyMe2t9cGmXmZgsdoZGYYhI= github.com/aws/aws-sdk-go-v2/service/ssm v1.59.1 h1:Z4cmgV3hKuUIkhJsdn47hf/ABYHUtILfMrV+L8+kRwE= github.com/aws/aws-sdk-go-v2/service/ssm v1.59.1/go.mod h1:PUWUl5MDiYNQkUHN9Pyd9kgtA/YhbxnSnHP+yQqzrM8= -github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 h1:Mc/MKBf2m4VynyJkABoVEN+QzkfLqGj0aiJuEe7cMeM= -github.com/aws/aws-sdk-go-v2/service/sso v1.28.0/go.mod h1:iS5OmxEcN4QIPXARGhavH7S8kETNL11kym6jhoS7IUQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 h1:6csaS/aJmqZQbKhi1EyEMM7yBW653Wy/B9hnBofW+sw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0/go.mod h1:59qHWaY5B+Rs7HGTuVGaC32m0rdpQ68N8QCN3khYiqs= -github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 h1:MG9VFW43M4A8BYeAfaJJZWrroinxeTi2r3+SnmLQfSA= -github.com/aws/aws-sdk-go-v2/service/sts v1.37.0/go.mod h1:JdeBDPgpJfuS6rU/hNglmOigKhyEZtBmbraLE4GK1J8= -github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= -github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.6 h1:A1oRkiSQOWstGh61y4Wc/yQ04sqrQZr1Si/oAXj20/s= +github.com/aws/aws-sdk-go-v2/service/sso v1.29.6/go.mod h1:5PfYspyCU5Vw1wNPsxi15LZovOnULudOQuVxphSflQA= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1 h1:5fm5RTONng73/QA73LhCNR7UT9RpFH3hR6HWL6bIgVY= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.1/go.mod h1:xBEjWD13h+6nq+z4AkqSfSvqRKFgDIQeaMguAJndOWo= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.6 h1:p3jIvqYwUZgu/XYeI48bJxOhvm47hZb5HUQ0tn6Q9kA= +github.com/aws/aws-sdk-go-v2/service/sts v1.38.6/go.mod h1:WtKK+ppze5yKPkZ0XwqIVWD4beCwv056ZbPQNoeHqM8= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/awslabs/goformation/v4 v4.19.5 h1:Y+Tzh01tWg8gf//AgGKUamaja7Wx9NPiJf1FpZu4/iU= github.com/awslabs/goformation/v4 v4.19.5/go.mod h1:JoNpnVCBOUtEz9bFxc9sjy8uBUCLF5c4D1L7RhRTVM8= github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= @@ -148,22 +145,18 @@ github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtM github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -<<<<<<< HEAD github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= -======= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -<<<<<<< HEAD -github.com/coredns/corefile-migration v1.0.26 h1:xiiEkVB1Dwolb24pkeDUDBfygV9/XsOSq79yFCrhptY= -github.com/coredns/corefile-migration v1.0.26/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= +github.com/coredns/corefile-migration v1.0.27 h1:WIIw5sU0LfGgoGnhdrYdVcto/aWmJoGA/C62iwkU0JM= +github.com/coredns/corefile-migration v1.0.27/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb h1:rmqyI19j3Z/74bIRhuC59RB442rXUazKNueVpfJPxg4= github.com/coreos/go-json v0.0.0-20230131223807-18775e0fb4fb/go.mod h1:rcFZM3uxVvdyNmsAV2jopgPD1cs5SPWJWU5dOz2LUnw= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= @@ -172,18 +165,14 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= -github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= +github.com/coreos/go-systemd/v22 v22.6.0/go.mod h1:iG+pp635Fo7ZmV/j14KUcmEyWF+0X7Lua8rrTWzYgWU= github.com/coreos/ignition v0.35.0 h1:UFodoYq1mOPrbEjtxIsZbThcDyQwAI1owczRDqWmKkQ= github.com/coreos/ignition v0.35.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= -github.com/coreos/ignition/v2 v2.16.2 h1:wPpxTovdzCLJISYmNiM5Cpw4qCPc3/P2ibruPyS46eA= -github.com/coreos/ignition/v2 v2.16.2/go.mod h1:Y1BKC60VSNgA5oWNoLIHXigpFX1FFn4CVeimmsI+Bhg= +github.com/coreos/ignition/v2 v2.24.0 h1:TVcsSWiYvhXihD8Mss3CTRuKaNZM2OIfpoKiudIhrKo= +github.com/coreos/ignition/v2 v2.24.0/go.mod h1:HelGgFZ1WZ4ZPOIDS0a06A2JTdbbdAine5r3AkSYz5s= github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687 h1:uSmlDgJGbUB0bwQBcZomBTottKwEDF5fF8UjSwKSzWM= github.com/coreos/vcontext v0.0.0-20230201181013-d72178a18687/go.mod h1:Salmysdw7DAVuobBW/LwsKKgpyCPHUhjyJoMJD+ZJiI= -======= -github.com/coredns/corefile-migration v1.0.27 h1:WIIw5sU0LfGgoGnhdrYdVcto/aWmJoGA/C62iwkU0JM= -github.com/coredns/corefile-migration v1.0.27/go.mod h1:56DPqONc3njpVPsdilEnfijCwNGC3/kTJLl7i7SPavY= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) github.com/cpuguy83/go-md2man/v2 v2.0.6 h1:XJtiaUW6dEEqVuZiMTn1ldk455QWwEIsMIJlo5vtkx0= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= @@ -230,8 +219,8 @@ github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/ github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= -github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= +github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= +github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -314,16 +303,9 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f h1:5CjVwnuUcp5adK4gmY6i72gpVFVnZDP2h5TmPScB6u4= github.com/google/goterm v0.0.0-20190703233501-fc88cf888a3f/go.mod h1:nOFQdrUlIlx6M6ODdSpBj1NVA+VgLC6kmw60mkw34H4= -<<<<<<< HEAD -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= -github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= -github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= -======= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -413,11 +395,8 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -<<<<<<< HEAD github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -======= ->>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -431,15 +410,12 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -<<<<<<< HEAD github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -======= ->>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -523,8 +499,8 @@ github.com/openshift-online/ocm-common v0.0.31 h1:csxB4UQAUhwhDOVBmOzUKgtemuwV9r github.com/openshift-online/ocm-common v0.0.31/go.mod h1:VEkuZp9aqbXtetZ5ycND6QpvhykvTuBF3oPsVM1X3vI= github.com/openshift-online/ocm-sdk-go v0.1.476 h1:l5gp/QEqnocqM02m7pDeS9ndXcCTBamewVSGaymd88Y= github.com/openshift-online/ocm-sdk-go v0.1.476/go.mod h1:ds+aOAlQbiK0ubZP3CwXkzd7m48v6fMQ1ef9UCrjzBY= -github.com/openshift/rosa v1.99.9-testing.0.20250926125556-7903b7e2b476 h1:Lk//9GAVe/QOX7EkBZ79HZcoLiGdK5doQO7ECUc6tf0= -github.com/openshift/rosa v1.99.9-testing.0.20250926125556-7903b7e2b476/go.mod h1:kb6iV145TXhUWBONqlflNIYNbrcYGLk/SFZD6vNx4wM= +github.com/openshift/rosa v1.2.55 h1:Y6UD1474aExF4bZSh2KH4zE+Xl2NVsiuj3TLQGT9U+Y= +github.com/openshift/rosa v1.2.55/go.mod h1:EE0yTEjbwxfnH/9YbQZaUXUVbIzfPa9KCRNw19QdLsw= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= @@ -532,12 +508,11 @@ github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xl github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -<<<<<<< HEAD github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -<<<<<<< HEAD +github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= +github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -547,37 +522,17 @@ github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2 github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= -======= -======= -github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= -github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) -github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= -github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= ->>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.2 h1:YwD0ulJSJytLpiaWua0sBDusfsCZohxjxzVTYjwxfV8= github.com/rivo/uniseg v0.4.2/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -<<<<<<< HEAD github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -======= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= ->>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= @@ -609,8 +564,8 @@ github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cA github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= -github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4= github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4= github.com/stoewer/go-strcase v1.3.0 h1:g0eASXYtp+yvN9fK8sH94oCIk0fau9uV1/ZdJ0AVEzs= @@ -620,12 +575,9 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -<<<<<<< HEAD -github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -======= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= ->>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) +github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -635,10 +587,12 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= +github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= @@ -657,54 +611,32 @@ gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a h1:DxppxFKRqJ8 gitlab.com/c0b/go-ordered-json v0.0.0-20201030195603-febf46534d5a/go.mod h1:NREvu3a57BaK0R1+ztrEzHWiZAihohNLQ6trPxlIqZI= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q= -go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= -go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0 h1:FyjCyI9jVEfqhUh2MoSkmolPjfh5fp2hnV0b0irxH4Q= go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.22.0/go.mod h1:hYwym2nDEeZfG/motx0p7L7J1N1vyzIThemQsb4g2qY= -<<<<<<< HEAD -<<<<<<< HEAD -go.opentelemetry.io/otel/metric v1.29.0 h1:vPf/HFWTNkPu1aYeIsc98l4ktOQaL6LeSoeV2g+8YLc= -go.opentelemetry.io/otel/metric v1.29.0/go.mod h1:auu/QWieFVWx+DmQOUMgj0F8LHWdgalxXqvp7BII/W8= -go.opentelemetry.io/otel/sdk v1.29.0 h1:vkqKjk7gwhS8VaWb0POZKmIEDimRCMsopNYnriHyryo= -go.opentelemetry.io/otel/sdk v1.29.0/go.mod h1:pM8Dx5WKnvxLCb+8lG1PRNIDxu9g9b9g59Qr7hfAAok= -go.opentelemetry.io/otel/trace v1.29.0 h1:J/8ZNK4XgR7a21DZUAsbF8pZ5Jcw1VhACmnYt39JTi4= -go.opentelemetry.io/otel/trace v1.29.0/go.mod h1:eHl3w0sp3paPkYstJOmAimxhiFXPg+MMTlEh3nsQgWQ= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -======= -go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= -go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= -go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= -go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= -go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= -go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= -go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= -go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= ->>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) -======= -go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= -go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= -go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= -go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU= -go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk= -go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w= -go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= -go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= -go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/mock v0.5.2 h1:LbtPTcP8A5k9WPXj54PPPbjcI4Y6lhyOZXn+VS7wNko= @@ -736,13 +668,8 @@ golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -<<<<<<< HEAD -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= -======= -golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM= -golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8= golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY= @@ -769,24 +696,19 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -<<<<<<< HEAD -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= -======= -golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs= -golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY= +golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -816,42 +738,22 @@ golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -<<<<<<< HEAD -golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= -golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= -golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= -golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= -======= -golang.org/x/sys v0.34.0 h1:H5Y5sJ2L2JRdyv7ROF1he/lPdvFsd0mJHFw2ThKHxLA= -golang.org/x/sys v0.34.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg= -golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -<<<<<<< HEAD -<<<<<<< HEAD -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -======= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -======= -golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4= -golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) -golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= -golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= ->>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/time v0.13.0 h1:eUlYslOIt32DgYD6utsuUeHs4d7AsEYLuIAdg7FlYgI= +golang.org/x/time v0.13.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -868,35 +770,32 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -<<<<<<< HEAD -golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= -golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -======= -golang.org/x/tools v0.34.0 h1:qIpSLOxeCYGg9TrcJokLBG4KFA6d795g0xkBkiESGlo= -golang.org/x/tools v0.34.0/go.mod h1:pAP9OwEaY1CAW3HOmg3hLZC5Z0CCmzjAF2UQMSqNARg= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24= -google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f h1:OxYkA3wjPsZyBylwymxSHa7ViiW1Sml4ToBrncvFehI= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250115164207-1a7da9e5054f/go.mod h1:+2Yz8+CLJbIfL9z73EW45avw8Lmge3xVElCP9zEKi50= +google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c h1:AtEkQdl5b6zsybXcbz00j1LwNodDuH6hVifIaNqk7NQ= +google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c/go.mod h1:ea2MjsO70ssTfCjiwHgI0ZFqcw45Ksuk2ckf9G468GA= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797 h1:CirRxTOwnRWVLKzDNrs0CXAaVozJoR4G9xvdRecrdpk= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251002232023-7c0ddcbb5797/go.mod h1:HSkG/KdJWusxU1F6CNrwNDjBMgisKxGnc5dAZfT0mjQ= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.71.3 h1:iEhneYTxOruJyZAxdAv8Y0iRZvsc5M6KoW7UA0/7jn0= -google.golang.org/grpc v1.71.3/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -905,13 +804,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -<<<<<<< HEAD -google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= -google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= -======= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -941,21 +835,7 @@ gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -<<<<<<< HEAD honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= -k8s.io/apiextensions-apiserver v0.32.3 h1:4D8vy+9GWerlErCwVIbcQjsWunF9SUGNu7O7hiQTyPY= -k8s.io/apiextensions-apiserver v0.32.3/go.mod h1:8YwcvVRMVzw0r1Stc7XfGAzB/SIVLunqApySV5V7Dss= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/apiserver v0.32.3 h1:kOw2KBuHOA+wetX1MkmrxgBr648ksz653j26ESuWNY8= -k8s.io/apiserver v0.32.3/go.mod h1:q1x9B8E/WzShF49wh3ADOh6muSfpmFL0I2t+TG0Zdgc= -k8s.io/cli-runtime v0.32.3 h1:khLF2ivU2T6Q77H97atx3REY9tXiA3OLOjWJxUrdvss= -k8s.io/cli-runtime v0.32.3/go.mod h1:vZT6dZq7mZAca53rwUfdFSZjdtLyfF61mkf/8q+Xjak= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= -======= k8s.io/api v0.33.4 h1:oTzrFVNPXBjMu0IlpA2eDDIU49jsuEorGHB4cvKupkk= k8s.io/api v0.33.4/go.mod h1:VHQZ4cuxQ9sCUMESJV5+Fe8bGnqAARZ08tSTdHWfeAc= k8s.io/apiextensions-apiserver v0.33.4 h1:rtq5SeXiDbXmSwxsF0MLe2Mtv3SwprA6wp+5qh/CrOU= @@ -968,14 +848,8 @@ k8s.io/cli-runtime v0.33.4 h1:V8NSxGfh24XzZVhXmIGzsApdBpGq0RQS2u/Fz1GvJwk= k8s.io/cli-runtime v0.33.4/go.mod h1:V+ilyokfqjT5OI+XE+O515K7jihtr0/uncwoyVqXaIU= k8s.io/client-go v0.33.4 h1:TNH+CSu8EmXfitntjUPwaKVPN0AYMbc9F1bBS8/ABpw= k8s.io/client-go v0.33.4/go.mod h1:LsA0+hBG2DPwovjd931L/AoaezMPX9CmBgyVyBZmbCY= -<<<<<<< HEAD ->>>>>>> 52df0c62f (deps: upgrade Kubernetes dependencies to v0.33.4) -k8s.io/cluster-bootstrap v0.32.3 h1:AqIpsUhB6MUeaAsl1WvaUw54AHRd2hfZrESlKChtd8s= -k8s.io/cluster-bootstrap v0.32.3/go.mod h1:CHbBwgOb6liDV6JFUTkx5t85T2xidy0sChBDoyYw344= -======= k8s.io/cluster-bootstrap v0.33.3 h1:u2NTxJ5CFSBFXaDxLQoOWMly8eni31psVso+caq6uwI= k8s.io/cluster-bootstrap v0.33.3/go.mod h1:p970f8u8jf273zyQ5raD8WUu2XyAl0SAWOY82o7i/ds= ->>>>>>> 4fccd02c0 (deps: update cluster-api to v1.11.1 and controller-runtime to v0.21.0) k8s.io/component-base v0.33.4 h1:Jvb/aw/tl3pfgnJ0E0qPuYLT0NwdYs1VXXYQmSuxJGY= k8s.io/component-base v0.33.4/go.mod h1:567TeSdixWW2Xb1yYUQ7qk5Docp2kNznKL87eygY8Rc= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= diff --git a/pkg/cloud/scope/rosanetwork.go b/pkg/cloud/scope/rosanetwork.go index 580b0c47e7..bf455393b4 100644 --- a/pkg/cloud/scope/rosanetwork.go +++ b/pkg/cloud/scope/rosanetwork.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/patch" ) diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index 2918a3d8fe..c143533870 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -52,8 +52,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" + controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util/conditions" From d03484cf24aa6e8780a073230a1452f3b0886624 Mon Sep 17 00:00:00 2001 From: Borja Clemente Date: Thu, 23 Oct 2025 12:13:00 +0200 Subject: [PATCH 6/9] Update imports, code and generations to CAPI 1.11 - Update all imports to v1beta2 types except for conditions staying in v1beta1. - Adapt source code to work with v1beta2 and deprecated conditions. - Manually update conversions. Signed-off-by: Borja Clemente --- Makefile | 12 +- api/v1beta1/awscluster_types.go | 19 +- api/v1beta1/awsclustertemplate_types.go | 3 +- api/v1beta1/awsmachine_types.go | 13 +- api/v1beta1/awsmachinetemplate_types.go | 3 +- api/v1beta1/conditions_consts.go | 54 ++++-- api/v1beta1/tags.go | 3 +- api/v1beta1/types.go | 3 +- api/v1beta1/zz_generated.conversion.go | 46 +++-- api/v1beta1/zz_generated.deepcopy.go | 13 +- api/v1beta2/awscluster_types.go | 20 ++- api/v1beta2/awscluster_webhook.go | 2 +- api/v1beta2/awscluster_webhook_test.go | 2 +- api/v1beta2/awsclustertemplate_types.go | 3 +- api/v1beta2/awsmachine_types.go | 12 +- api/v1beta2/awsmachinetemplate_types.go | 3 +- api/v1beta2/awsmanagedcluster_types.go | 14 +- api/v1beta2/conditions_consts.go | 3 +- api/v1beta2/tags.go | 3 +- api/v1beta2/types.go | 3 +- api/v1beta2/zz_generated.deepcopy.go | 9 +- bootstrap/eks/api/v1beta1/condition_consts.go | 3 +- bootstrap/eks/api/v1beta1/eksconfig_types.go | 11 +- .../api/v1beta1/zz_generated.conversion.go | 6 +- .../eks/api/v1beta1/zz_generated.deepcopy.go | 4 +- bootstrap/eks/api/v1beta2/condition_consts.go | 7 +- bootstrap/eks/api/v1beta2/eksconfig_types.go | 11 +- .../eks/controllers/eksconfig_controller.go | 33 ++-- .../eksconfig_controller_reconciler_test.go | 41 ++--- .../controllers/eksconfig_controller_test.go | 11 +- cmd/clusterawsadm/gc/gc.go | 6 +- cmd/clusterawsadm/gc/gc_test.go | 27 ++- ...ster.x-k8s.io_awsmanagedcontrolplanes.yaml | 23 ++- ...8s.io_awsmanagedcontrolplanetemplates.yaml | 7 +- ...ne.cluster.x-k8s.io_rosacontrolplanes.yaml | 7 +- ...tructure.cluster.x-k8s.io_awsclusters.yaml | 24 ++- ....cluster.x-k8s.io_awsclustertemplates.yaml | 16 +- ....cluster.x-k8s.io_awsmachinetemplates.yaml | 2 + ...e.cluster.x-k8s.io_awsmanagedclusters.yaml | 16 +- ...r.x-k8s.io_awsmanagedclustertemplates.yaml | 7 +- ...ructure.cluster.x-k8s.io_rosaclusters.yaml | 16 +- controllers/awscluster_controller.go | 39 +++-- controllers/awscluster_controller_test.go | 57 +++--- .../awscluster_controller_unit_test.go | 14 +- controllers/awsmachine_controller.go | 82 ++++----- controllers/awsmachine_controller_test.go | 34 ++-- .../awsmachine_controller_unit_test.go | 163 +++++++++--------- controllers/awsmanagedcluster_controller.go | 10 +- controllers/helpers_test.go | 4 +- controllers/rosacluster_controller.go | 21 ++- controllers/rosacluster_controller_test.go | 40 ++--- controllers/suite_test.go | 4 +- .../v1beta1/awsmanagedcontrolplane_types.go | 12 +- .../eks/api/v1beta1/conditions_consts.go | 17 +- .../api/v1beta1/zz_generated.conversion.go | 34 +++- .../eks/api/v1beta1/zz_generated.deepcopy.go | 6 +- .../v1beta2/awsmanagedcontrolplane_types.go | 12 +- .../eks/api/v1beta2/conditions_consts.go | 3 +- .../eks/api/v1beta2/zz_generated.deepcopy.go | 3 +- .../awsmanagedcontrolplane_controller.go | 66 +++---- .../awsmanagedcontrolplane_controller_test.go | 14 +- controlplane/eks/controllers/helpers_test.go | 18 +- controlplane/eks/controllers/suite_test.go | 3 +- .../rosa/api/v1beta1/conditions_consts.go | 51 ++++++ .../rosa/api/v1beta2/conditions_consts.go | 3 +- .../api/v1beta2/rosacontrolplane_types.go | 9 +- .../rosacontrolplane_controller.go | 92 +++++----- .../rosacontrolplane_controller_test.go | 22 +-- controlplane/rosa/controllers/suite_test.go | 3 +- exp/api/v1beta1/awsfargateprofile_types.go | 21 ++- exp/api/v1beta1/awsmachinepool_types.go | 11 +- .../v1beta1/awsmanagedmachinepool_types.go | 21 ++- exp/api/v1beta1/conditions_consts.go | 87 ++++++++-- exp/api/v1beta1/zz_generated.conversion.go | 14 +- exp/api/v1beta1/zz_generated.deepcopy.go | 8 +- exp/api/v1beta2/awsfargateprofile_types.go | 11 +- exp/api/v1beta2/awsfargateprofile_webhook.go | 3 +- .../v1beta2/awsfargateprofile_webhook_test.go | 3 +- exp/api/v1beta2/awsmachinepool_types.go | 11 +- .../v1beta2/awsmanagedmachinepool_types.go | 11 +- exp/api/v1beta2/conditions_consts.go | 3 +- exp/api/v1beta2/rosacluster_types.go | 14 +- exp/api/v1beta2/rosamachinepool_types.go | 10 +- exp/api/v1beta2/rosanetwork_types.go | 10 +- exp/api/v1beta2/rosaroleconfig_types.go | 8 +- exp/api/v1beta2/zz_generated.deepcopy.go | 3 +- exp/controlleridentitycreator/suite_test.go | 3 +- exp/controllers/awsfargatepool_controller.go | 14 +- exp/controllers/awsmachinepool_controller.go | 43 ++--- .../awsmachinepool_controller_test.go | 25 +-- exp/controllers/awsmachinepool_machines.go | 2 +- .../awsmanagedmachinepool_controller.go | 20 ++- exp/controllers/rosamachinepool_controller.go | 39 +++-- .../rosamachinepool_controller_test.go | 46 +++-- exp/controllers/rosanetwork_controller.go | 55 +++--- .../rosanetwork_controller_test.go | 62 ++++--- exp/controllers/rosaroleconfig_controller.go | 50 +++--- .../rosaroleconfig_controller_test.go | 9 +- exp/controllers/suite_test.go | 2 +- exp/instancestate/suite_test.go | 2 +- go.mod | 2 +- go.sum | 4 +- hack/tools/third_party/conversion-gen/main.go | 1 - main.go | 2 +- pkg/cloud/endpoints/partitions.go | 81 +++++---- pkg/cloud/interfaces.go | 8 +- pkg/cloud/scope/cluster.go | 68 ++++---- pkg/cloud/scope/elb.go | 3 +- pkg/cloud/scope/fargate.go | 26 +-- pkg/cloud/scope/launchtemplate.go | 4 +- pkg/cloud/scope/machine.go | 30 ++-- pkg/cloud/scope/machine_test.go | 3 +- pkg/cloud/scope/machinepool.go | 14 +- pkg/cloud/scope/managedcontrolplane.go | 53 +++--- pkg/cloud/scope/managednodegroup.go | 33 ++-- pkg/cloud/scope/rosacontrolplane.go | 22 ++- pkg/cloud/scope/rosamachinepool.go | 20 ++- pkg/cloud/scope/rosanetwork.go | 9 +- pkg/cloud/scope/rosaroleconfig.go | 2 +- pkg/cloud/scope/session.go | 21 +-- pkg/cloud/scope/session_test.go | 3 +- pkg/cloud/scope/shared.go | 8 +- .../services/autoscaling/autoscalinggroup.go | 3 +- .../autoscaling/autoscalinggroup_test.go | 2 +- .../services/autoscaling/lifecyclehook.go | 16 +- pkg/cloud/services/ec2/bastion.go | 17 +- pkg/cloud/services/ec2/bastion_test.go | 2 +- pkg/cloud/services/ec2/helper_test.go | 5 +- pkg/cloud/services/ec2/instances.go | 22 +-- pkg/cloud/services/ec2/instances_test.go | 47 +++-- pkg/cloud/services/ec2/launchtemplate.go | 41 +++-- pkg/cloud/services/ec2/launchtemplate_test.go | 16 +- pkg/cloud/services/eks/cluster.go | 26 +-- pkg/cloud/services/eks/cluster_test.go | 2 +- pkg/cloud/services/eks/config.go | 2 +- pkg/cloud/services/eks/config_test.go | 2 +- pkg/cloud/services/eks/eks.go | 40 ++--- pkg/cloud/services/eks/fargate.go | 61 +++---- pkg/cloud/services/eks/nodegroup.go | 6 +- pkg/cloud/services/eks/oidc_test.go | 3 +- pkg/cloud/services/eks/roles.go | 11 +- pkg/cloud/services/elb/loadbalancer.go | 21 +-- pkg/cloud/services/elb/loadbalancer_test.go | 24 +-- pkg/cloud/services/gc/cleanup_test.go | 20 +-- pkg/cloud/services/iamauth/reconcile.go | 13 +- pkg/cloud/services/iamauth/reconcile_test.go | 25 ++- pkg/cloud/services/iamauth/suite_test.go | 2 +- .../services/instancestate/helpers_test.go | 2 +- pkg/cloud/services/network/carriergateways.go | 6 +- .../services/network/carriergateways_test.go | 2 +- .../services/network/egress_only_gateways.go | 5 +- .../network/egress_only_gateways_test.go | 2 +- pkg/cloud/services/network/eips_test.go | 2 +- pkg/cloud/services/network/gateways.go | 5 +- pkg/cloud/services/network/gateways_test.go | 2 +- pkg/cloud/services/network/natgateways.go | 23 +-- .../services/network/natgateways_test.go | 2 +- pkg/cloud/services/network/network.go | 91 +++++----- pkg/cloud/services/network/routetables.go | 5 +- .../services/network/routetables_test.go | 2 +- .../services/network/secondarycidr_test.go | 28 +-- pkg/cloud/services/network/subnets.go | 5 +- pkg/cloud/services/network/subnets_test.go | 2 +- pkg/cloud/services/network/vpc.go | 7 +- pkg/cloud/services/network/vpc_test.go | 2 +- pkg/cloud/services/s3/s3_test.go | 2 +- .../services/secretsmanager/secret_test.go | 2 +- .../services/securitygroup/securitygroups.go | 15 +- .../securitygroup/securitygroups_test.go | 2 +- pkg/cloud/services/ssm/secret_test.go | 2 +- pkg/utils/utils.go | 2 +- test/e2e/shared/common.go | 2 +- .../suites/managed/control_plane_helpers.go | 2 +- test/e2e/suites/managed/machine_deployment.go | 2 +- .../managed/machine_deployment_helpers.go | 2 +- test/e2e/suites/managed/managed_suite_test.go | 2 +- test/e2e/suites/unmanaged/helpers_test.go | 6 +- .../unmanaged_classic_elb_upgrade_test.go | 2 +- .../unmanaged/unmanaged_functional_test.go | 6 +- test/helpers/envtest.go | 2 +- util/conditions/helper.go | 13 +- util/paused/paused.go | 40 +++-- util/paused/paused_test.go | 5 +- 183 files changed, 1755 insertions(+), 1371 deletions(-) create mode 100644 controlplane/rosa/api/v1beta1/conditions_consts.go diff --git a/Makefile b/Makefile index 6d1668596c..420568678a 100644 --- a/Makefile +++ b/Makefile @@ -204,7 +204,7 @@ endif .PHONY: defaulters defaulters: $(DEFAULTER_GEN) ## Generate all Go types $(DEFAULTER_GEN) \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --v=0 \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ --output-file=zz_generated.defaults.go \ @@ -262,7 +262,7 @@ generate-go-apis: ## Alias for .build/generate-go-apis $(MAKE) defaulters $(CONVERSION_GEN) \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./api/v1beta1 \ @@ -270,28 +270,28 @@ generate-go-apis: ## Alias for .build/generate-go-apis $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./$(EXP_DIR)/api/v1beta1 $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./bootstrap/eks/api/v1beta1 $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./controlplane/eks/api/v1beta1 $(CONVERSION_GEN) \ --extra-peer-dirs=sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1 \ - --extra-peer-dirs=sigs.k8s.io/cluster-api/api/v1beta1 \ + --extra-peer-dirs=sigs.k8s.io/cluster-api/api/core/v1beta2 \ --output-file=zz_generated.conversion.go \ --go-header-file=./hack/boilerplate/boilerplate.generatego.txt \ ./controlplane/rosa/api/v1beta2 diff --git a/api/v1beta1/awscluster_types.go b/api/v1beta1/awscluster_types.go index ea0fe220a0..24e3e37116 100644 --- a/api/v1beta1/awscluster_types.go +++ b/api/v1beta1/awscluster_types.go @@ -19,7 +19,8 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( @@ -200,11 +201,11 @@ type AWSLoadBalancerSpec struct { // AWSClusterStatus defines the observed state of AWSCluster. type AWSClusterStatus struct { // +kubebuilder:default=false - Ready bool `json:"ready"` - Network NetworkStatus `json:"networkStatus,omitempty"` - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` - Bastion *Instance `json:"bastion,omitempty"` - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Ready bool `json:"ready"` + Network NetworkStatus `json:"networkStatus,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` + Bastion *Instance `json:"bastion,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition. @@ -254,12 +255,12 @@ type AWSClusterList struct { } // GetConditions returns the observations of the operational state of the AWSCluster resource. -func (r *AWSCluster) GetConditions() clusterv1.Conditions { +func (r *AWSCluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1.Conditions. -func (r *AWSCluster) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1beta1.Conditions. +func (r *AWSCluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta1/awsclustertemplate_types.go b/api/v1beta1/awsclustertemplate_types.go index 7fa11075e7..75139795ae 100644 --- a/api/v1beta1/awsclustertemplate_types.go +++ b/api/v1beta1/awsclustertemplate_types.go @@ -19,7 +19,8 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) // AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate. type AWSClusterTemplateSpec struct { diff --git a/api/v1beta1/awsmachine_types.go b/api/v1beta1/awsmachine_types.go index 0a51b360cb..d6bf89d1ea 100644 --- a/api/v1beta1/awsmachine_types.go +++ b/api/v1beta1/awsmachine_types.go @@ -19,7 +19,8 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) const ( // MachineFinalizer allows ReconcileAWSMachine to clean up AWS resources associated with AWSMachine before @@ -206,7 +207,7 @@ type AWSMachineStatus struct { Interruptible bool `json:"interruptible,omitempty"` // Addresses contains the AWS instance associated addresses. - Addresses []clusterv1.MachineAddress `json:"addresses,omitempty"` + Addresses []clusterv1beta1.MachineAddress `json:"addresses,omitempty"` // InstanceState is the state of the AWS instance for this machine. // +optional @@ -252,7 +253,7 @@ type AWSMachineStatus struct { // Conditions defines current service state of the AWSMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -275,12 +276,12 @@ type AWSMachine struct { } // GetConditions returns the observations of the operational state of the AWSMachine resource. -func (r *AWSMachine) GetConditions() clusterv1.Conditions { +func (r *AWSMachine) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1.Conditions. -func (r *AWSMachine) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1beta1.Conditions. +func (r *AWSMachine) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta1/awsmachinetemplate_types.go b/api/v1beta1/awsmachinetemplate_types.go index 7ee1de3918..05d75ef554 100644 --- a/api/v1beta1/awsmachinetemplate_types.go +++ b/api/v1beta1/awsmachinetemplate_types.go @@ -20,7 +20,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) // AWSMachineTemplateStatus defines a status for an AWSMachineTemplate. type AWSMachineTemplateStatus struct { diff --git a/api/v1beta1/conditions_consts.go b/api/v1beta1/conditions_consts.go index e10c31ce72..ddbd9b41f0 100644 --- a/api/v1beta1/conditions_consts.go +++ b/api/v1beta1/conditions_consts.go @@ -16,18 +16,19 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + const ( // PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully. // A possible scenario, where retrieval is unsuccessful, is when SourcePrincipal is not authorized for assume role. - PrincipalCredentialRetrievedCondition clusterv1.ConditionType = "PrincipalCredentialRetrieved" + PrincipalCredentialRetrievedCondition clusterv1beta1.ConditionType = "PrincipalCredentialRetrieved" // PrincipalCredentialRetrievalFailedReason used when errors occur during identity credential retrieval. PrincipalCredentialRetrievalFailedReason = "PrincipalCredentialRetrievalFailed" // CredentialProviderBuildFailedReason used when errors occur during building providers before trying credential retrieval. //nolint:gosec CredentialProviderBuildFailedReason = "CredentialProviderBuildFailed" // PrincipalUsageAllowedCondition reports on whether Principal and all the nested source identities are allowed to be used in the AWSCluster namespace. - PrincipalUsageAllowedCondition clusterv1.ConditionType = "PrincipalUsageAllowed" + PrincipalUsageAllowedCondition clusterv1beta1.ConditionType = "PrincipalUsageAllowed" // PrincipalUsageUnauthorizedReason used when AWSCluster namespace is not in the identity's allowed namespaces list. PrincipalUsageUnauthorizedReason = "PrincipalUsageUnauthorized" // SourcePrincipalUsageUnauthorizedReason used when AWSCluster is not in the intersection of source identity allowed namespaces @@ -37,7 +38,7 @@ const ( const ( // VpcReadyCondition reports on the successful reconciliation of a VPC. - VpcReadyCondition clusterv1.ConditionType = "VpcReady" + VpcReadyCondition clusterv1beta1.ConditionType = "VpcReady" // VpcCreationStartedReason used when attempting to create a VPC for a managed cluster. // Will not be applied to unmanaged clusters. VpcCreationStartedReason = "VpcCreationStarted" @@ -47,7 +48,7 @@ const ( const ( // SubnetsReadyCondition reports on the successful reconciliation of subnets. - SubnetsReadyCondition clusterv1.ConditionType = "SubnetsReady" + SubnetsReadyCondition clusterv1beta1.ConditionType = "SubnetsReady" // SubnetsReconciliationFailedReason used to report failures while reconciling subnets. SubnetsReconciliationFailedReason = "SubnetsReconciliationFailed" ) @@ -55,7 +56,7 @@ const ( const ( // InternetGatewayReadyCondition reports on the successful reconciliation of internet gateways. // Only applicable to managed clusters. - InternetGatewayReadyCondition clusterv1.ConditionType = "InternetGatewayReady" + InternetGatewayReadyCondition clusterv1beta1.ConditionType = "InternetGatewayReady" // InternetGatewayFailedReason used when errors occur during internet gateway reconciliation. InternetGatewayFailedReason = "InternetGatewayFailed" ) @@ -63,15 +64,23 @@ const ( const ( // EgressOnlyInternetGatewayReadyCondition reports on the successful reconciliation of egress only internet gateways. // Only applicable to managed clusters. - EgressOnlyInternetGatewayReadyCondition clusterv1.ConditionType = "EgressOnlyInternetGatewayReady" + EgressOnlyInternetGatewayReadyCondition clusterv1beta1.ConditionType = "EgressOnlyInternetGatewayReady" // EgressOnlyInternetGatewayFailedReason used when errors occur during egress only internet gateway reconciliation. EgressOnlyInternetGatewayFailedReason = "EgressOnlyInternetGatewayFailed" ) +const ( + // CarrierGatewayReadyCondition reports on the successful reconciliation of carrier gateways. + // Only applicable to managed clusters. + CarrierGatewayReadyCondition clusterv1beta1.ConditionType = "CarrierGatewayReady" + // CarrierGatewayFailedReason used when errors occur during carrier gateway reconciliation. + CarrierGatewayFailedReason = "CarrierGatewayFailed" +) + const ( // NatGatewaysReadyCondition reports successful reconciliation of NAT gateways. // Only applicable to managed clusters. - NatGatewaysReadyCondition clusterv1.ConditionType = "NatGatewaysReady" + NatGatewaysReadyCondition clusterv1beta1.ConditionType = "NatGatewaysReady" // NatGatewaysCreationStartedReason set once when creating new NAT gateways. NatGatewaysCreationStartedReason = "NatGatewaysCreationStarted" // NatGatewaysReconciliationFailedReason used when any errors occur during reconciliation of NAT gateways. @@ -81,22 +90,30 @@ const ( const ( // RouteTablesReadyCondition reports successful reconciliation of route tables. // Only applicable to managed clusters. - RouteTablesReadyCondition clusterv1.ConditionType = "RouteTablesReady" + RouteTablesReadyCondition clusterv1beta1.ConditionType = "RouteTablesReady" // RouteTableReconciliationFailedReason used when any errors occur during reconciliation of route tables. RouteTableReconciliationFailedReason = "RouteTableReconciliationFailed" ) +const ( + // VpcEndpointsReadyCondition reports successful reconciliation of vpc endpoints. + // Only applicable to managed clusters. + VpcEndpointsReadyCondition clusterv1beta1.ConditionType = "VpcEndpointsReadyCondition" + // VpcEndpointsReconciliationFailedReason used when any errors occur during reconciliation of vpc endpoints. + VpcEndpointsReconciliationFailedReason = "VpcEndpointsReconciliationFailed" +) + const ( // SecondaryCidrsReadyCondition reports successful reconciliation of secondary CIDR blocks. // Only applicable to managed clusters. - SecondaryCidrsReadyCondition clusterv1.ConditionType = "SecondaryCidrsReady" + SecondaryCidrsReadyCondition clusterv1beta1.ConditionType = "SecondaryCidrsReady" // SecondaryCidrReconciliationFailedReason used when any errors occur during reconciliation of secondary CIDR blocks. SecondaryCidrReconciliationFailedReason = "SecondaryCidrReconciliationFailed" ) const ( // ClusterSecurityGroupsReadyCondition reports successful reconciliation of security groups. - ClusterSecurityGroupsReadyCondition clusterv1.ConditionType = "ClusterSecurityGroupsReady" + ClusterSecurityGroupsReadyCondition clusterv1beta1.ConditionType = "ClusterSecurityGroupsReady" // ClusterSecurityGroupReconciliationFailedReason used when any errors occur during reconciliation of security groups. ClusterSecurityGroupReconciliationFailedReason = "SecurityGroupReconciliationFailed" ) @@ -104,7 +121,7 @@ const ( const ( // BastionHostReadyCondition reports whether a bastion host is ready. Depending on the configuration, a cluster // may not require a bastion host and this condition will be skipped. - BastionHostReadyCondition clusterv1.ConditionType = "BastionHostReady" + BastionHostReadyCondition clusterv1beta1.ConditionType = "BastionHostReady" // BastionCreationStartedReason used when creating a new bastion host. BastionCreationStartedReason = "BastionCreationStarted" // BastionHostFailedReason used when an error occurs during the creation of a bastion host. @@ -113,9 +130,12 @@ const ( const ( // LoadBalancerReadyCondition reports on whether a control plane load balancer was successfully reconciled. - LoadBalancerReadyCondition clusterv1.ConditionType = "LoadBalancerReady" + LoadBalancerReadyCondition clusterv1beta1.ConditionType = "LoadBalancerReady" // WaitForDNSNameReason used while waiting for a DNS name for the API server to be populated. WaitForDNSNameReason = "WaitForDNSName" + // WaitForExternalControlPlaneEndpointReason is available when the AWS Cluster is waiting for an externally managed + // Load Balancer, such as an external Control Plane provider. + WaitForExternalControlPlaneEndpointReason = "WaitForExternalControlPlaneEndpoint" // WaitForDNSNameResolveReason used while waiting for DNS name to resolve. WaitForDNSNameResolveReason = "WaitForDNSNameResolve" // LoadBalancerFailedReason used when an error occurs during load balancer reconciliation. @@ -124,7 +144,7 @@ const ( const ( // InstanceReadyCondition reports on current status of the EC2 instance. Ready indicates the instance is in a Running state. - InstanceReadyCondition clusterv1.ConditionType = "InstanceReady" + InstanceReadyCondition clusterv1beta1.ConditionType = "InstanceReady" // InstanceNotFoundReason used when the instance couldn't be retrieved. InstanceNotFoundReason = "InstanceNotFound" @@ -146,7 +166,7 @@ const ( const ( // SecurityGroupsReadyCondition indicates the security groups are up to date on the AWSMachine. - SecurityGroupsReadyCondition clusterv1.ConditionType = "SecurityGroupsReady" + SecurityGroupsReadyCondition clusterv1beta1.ConditionType = "SecurityGroupsReady" // SecurityGroupsFailedReason used when the security groups could not be synced. SecurityGroupsFailedReason = "SecurityGroupsSyncFailed" @@ -157,7 +177,7 @@ const ( // When set to false, severity can be an Error if the subnet is not found or unavailable in the instance's AZ. // Note this is only applicable to control plane machines. // Only applicable to control plane machines. - ELBAttachedCondition clusterv1.ConditionType = "ELBAttached" + ELBAttachedCondition clusterv1beta1.ConditionType = "ELBAttached" // ELBAttachFailedReason used when a control plane node fails to attach to the ELB. ELBAttachFailedReason = "ELBAttachFailed" @@ -167,7 +187,7 @@ const ( const ( // S3BucketReadyCondition indicates an S3 bucket has been created successfully. - S3BucketReadyCondition clusterv1.ConditionType = "S3BucketCreated" + S3BucketReadyCondition clusterv1beta1.ConditionType = "S3BucketCreated" // S3BucketFailedReason is used when any errors occur during reconciliation of an S3 bucket. S3BucketFailedReason = "S3BucketCreationFailed" diff --git a/api/v1beta1/tags.go b/api/v1beta1/tags.go index 877ef85d5c..1d711937a0 100644 --- a/api/v1beta1/tags.go +++ b/api/v1beta1/tags.go @@ -24,7 +24,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) // Tags defines a map of tags. type Tags map[string]string diff --git a/api/v1beta1/types.go b/api/v1beta1/types.go index 53d94def4a..74883da886 100644 --- a/api/v1beta1/types.go +++ b/api/v1beta1/types.go @@ -19,7 +19,8 @@ package v1beta1 import ( "k8s.io/apimachinery/pkg/util/sets" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) // AWSResourceReference is a reference to a specific AWS resource by ID or filters. // Only one of ID or Filters may be specified. Specifying more than one will result in diff --git a/api/v1beta1/zz_generated.conversion.go b/api/v1beta1/zz_generated.conversion.go index eb665466ec..cf6c9f82ba 100644 --- a/api/v1beta1/zz_generated.conversion.go +++ b/api/v1beta1/zz_generated.conversion.go @@ -29,7 +29,8 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func init() { @@ -1036,7 +1037,19 @@ func autoConvert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus(in *AWSClu if err := Convert_v1beta1_NetworkStatus_To_v1beta2_NetworkStatus(&in.Network, &out.Network, s); err != nil { return err } - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make(map[string]corev1beta2.FailureDomain, len(*in)) + for key, val := range *in { + (*out)[key] = corev1beta2.FailureDomain{ + Name: key, + ControlPlane: &val.ControlPlane, + Attributes: val.Attributes, + } + } + } else { + out.FailureDomains = nil + } if in.Bastion != nil { in, out := &in.Bastion, &out.Bastion *out = new(v1beta2.Instance) @@ -1046,7 +1059,7 @@ func autoConvert_v1beta1_AWSClusterStatus_To_v1beta2_AWSClusterStatus(in *AWSClu } else { out.Bastion = nil } - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1060,7 +1073,18 @@ func autoConvert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *v1beta if err := Convert_v1beta2_NetworkStatus_To_v1beta1_NetworkStatus(&in.Network, &out.Network, s); err != nil { return err } - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make(corev1beta1.FailureDomains, len(*in)) + for key, val := range *in { + (*out)[key] = corev1beta1.FailureDomainSpec{ + ControlPlane: *val.ControlPlane, + Attributes: val.Attributes, + } + } + } else { + out.FailureDomains = nil + } if in.Bastion != nil { in, out := &in.Bastion, &out.Bastion *out = new(Instance) @@ -1070,7 +1094,7 @@ func autoConvert_v1beta2_AWSClusterStatus_To_v1beta1_AWSClusterStatus(in *v1beta } else { out.Bastion = nil } - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1459,11 +1483,11 @@ func autoConvert_v1beta2_AWSMachineSpec_To_v1beta1_AWSMachineSpec(in *v1beta2.AW func autoConvert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(in *AWSMachineStatus, out *v1beta2.AWSMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.Interruptible = in.Interruptible - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta2.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.InstanceState = (*v1beta2.InstanceState)(unsafe.Pointer(in.InstanceState)) out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -1475,11 +1499,11 @@ func Convert_v1beta1_AWSMachineStatus_To_v1beta2_AWSMachineStatus(in *AWSMachine func autoConvert_v1beta2_AWSMachineStatus_To_v1beta1_AWSMachineStatus(in *v1beta2.AWSMachineStatus, out *AWSMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.Interruptible = in.Interruptible - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.InstanceState = (*InstanceState)(unsafe.Pointer(in.InstanceState)) out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -2009,7 +2033,7 @@ func autoConvert_v1beta1_Instance_To_v1beta2_Instance(in *Instance, out *v1beta2 out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs)) out.UserData = (*string)(unsafe.Pointer(in.UserData)) out.IAMProfile = in.IAMProfile - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta2.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP)) out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP)) out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport)) @@ -2040,7 +2064,7 @@ func autoConvert_v1beta2_Instance_To_v1beta1_Instance(in *v1beta2.Instance, out out.SecurityGroupIDs = *(*[]string)(unsafe.Pointer(&in.SecurityGroupIDs)) out.UserData = (*string)(unsafe.Pointer(in.UserData)) out.IAMProfile = in.IAMProfile - out.Addresses = *(*[]apiv1beta1.MachineAddress)(unsafe.Pointer(&in.Addresses)) + out.Addresses = *(*[]corev1beta2.MachineAddress)(unsafe.Pointer(&in.Addresses)) out.PrivateIP = (*string)(unsafe.Pointer(in.PrivateIP)) out.PublicIP = (*string)(unsafe.Pointer(in.PublicIP)) out.ENASupport = (*bool)(unsafe.Pointer(in.ENASupport)) diff --git a/api/v1beta1/zz_generated.deepcopy.go b/api/v1beta1/zz_generated.deepcopy.go index 9566fb53a4..f25093d8fe 100644 --- a/api/v1beta1/zz_generated.deepcopy.go +++ b/api/v1beta1/zz_generated.deepcopy.go @@ -23,7 +23,8 @@ package v1beta1 import ( "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -409,7 +410,7 @@ func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) { in.Network.DeepCopyInto(&out.Network) if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(apiv1beta1.FailureDomains, len(*in)) + *out = make(corev1beta1.FailureDomains, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -421,7 +422,7 @@ func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -741,7 +742,7 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) { *out = *in if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]apiv1beta1.MachineAddress, len(*in)) + *out = make([]corev1beta1.MachineAddress, len(*in)) copy(*out, *in) } if in.InstanceState != nil { @@ -761,7 +762,7 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -1305,7 +1306,7 @@ func (in *Instance) DeepCopyInto(out *Instance) { } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]apiv1beta1.MachineAddress, len(*in)) + *out = make([]v1beta2.MachineAddress, len(*in)) copy(*out, *in) } if in.PrivateIP != nil { diff --git a/api/v1beta2/awscluster_types.go b/api/v1beta2/awscluster_types.go index fc0f1f91f2..37b6c72a6f 100644 --- a/api/v1beta2/awscluster_types.go +++ b/api/v1beta2/awscluster_types.go @@ -19,7 +19,9 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) const ( // ClusterFinalizer allows ReconcileAWSCluster to clean up AWS resources associated with AWSCluster before @@ -275,11 +277,11 @@ type AdditionalListenerSpec struct { // AWSClusterStatus defines the observed state of AWSCluster. type AWSClusterStatus struct { // +kubebuilder:default=false - Ready bool `json:"ready"` - Network NetworkStatus `json:"networkStatus,omitempty"` - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` - Bastion *Instance `json:"bastion,omitempty"` - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Ready bool `json:"ready"` + Network NetworkStatus `json:"networkStatus,omitempty"` + FailureDomains map[string]clusterv1.FailureDomain `json:"failureDomains,omitempty"` + Bastion *Instance `json:"bastion,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // S3Bucket defines a supporting S3 bucket for the cluster, currently can be optionally used for Ignition. @@ -345,12 +347,12 @@ type AWSClusterList struct { } // GetConditions returns the observations of the operational state of the AWSCluster resource. -func (r *AWSCluster) GetConditions() clusterv1.Conditions { +func (r *AWSCluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1.Conditions. -func (r *AWSCluster) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSCluster to the predescribed clusterv1beta1.Conditions. +func (r *AWSCluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta2/awscluster_webhook.go b/api/v1beta2/awscluster_webhook.go index 525e3157c3..4a2138e068 100644 --- a/api/v1beta2/awscluster_webhook.go +++ b/api/v1beta2/awscluster_webhook.go @@ -30,7 +30,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/annotations" ) diff --git a/api/v1beta2/awscluster_webhook_test.go b/api/v1beta2/awscluster_webhook_test.go index 04f2e89cdd..2c975dda08 100644 --- a/api/v1beta2/awscluster_webhook_test.go +++ b/api/v1beta2/awscluster_webhook_test.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/util/defaulting" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestAWSClusterDefault(t *testing.T) { diff --git a/api/v1beta2/awsclustertemplate_types.go b/api/v1beta2/awsclustertemplate_types.go index 9501cde7b4..98d207c0f4 100644 --- a/api/v1beta2/awsclustertemplate_types.go +++ b/api/v1beta2/awsclustertemplate_types.go @@ -19,7 +19,8 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) // AWSClusterTemplateSpec defines the desired state of AWSClusterTemplate. type AWSClusterTemplateSpec struct { diff --git a/api/v1beta2/awsmachine_types.go b/api/v1beta2/awsmachine_types.go index 3c756c15e7..db092f74d7 100644 --- a/api/v1beta2/awsmachine_types.go +++ b/api/v1beta2/awsmachine_types.go @@ -19,7 +19,9 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) const ( // MachineFinalizer allows ReconcileAWSMachine to clean up AWS resources associated with AWSMachine before @@ -436,7 +438,7 @@ type AWSMachineStatus struct { // Conditions defines current service state of the AWSMachine. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -460,12 +462,12 @@ type AWSMachine struct { } // GetConditions returns the observations of the operational state of the AWSMachine resource. -func (r *AWSMachine) GetConditions() clusterv1.Conditions { +func (r *AWSMachine) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1.Conditions. -func (r *AWSMachine) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSMachine to the predescribed clusterv1beta1.Conditions. +func (r *AWSMachine) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta2/awsmachinetemplate_types.go b/api/v1beta2/awsmachinetemplate_types.go index 47a2b018b2..8e7de6af5b 100644 --- a/api/v1beta2/awsmachinetemplate_types.go +++ b/api/v1beta2/awsmachinetemplate_types.go @@ -20,7 +20,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) // AWSMachineTemplateStatus defines a status for an AWSMachineTemplate. type AWSMachineTemplateStatus struct { diff --git a/api/v1beta2/awsmanagedcluster_types.go b/api/v1beta2/awsmanagedcluster_types.go index d9526ad86f..76da34d783 100644 --- a/api/v1beta2/awsmanagedcluster_types.go +++ b/api/v1beta2/awsmanagedcluster_types.go @@ -19,7 +19,9 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) // AWSManagedClusterSpec defines the desired state of AWSManagedCluster type AWSManagedClusterSpec struct { @@ -36,11 +38,11 @@ type AWSManagedClusterStatus struct { // FailureDomains specifies a list fo available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains map[string]clusterv1.FailureDomain `json:"failureDomains,omitempty"` // Conditions defines current service state of the AWSManagedCluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -75,12 +77,12 @@ func init() { // GetConditions returns the observations of the operational state of the // AWSManagedCluster resource. -func (r *AWSManagedCluster) GetConditions() clusterv1.Conditions { +func (r *AWSManagedCluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the underlying service state of the AWSManagedCluster to -// the predescribed clusterv1.Conditions. -func (r *AWSManagedCluster) SetConditions(conditions clusterv1.Conditions) { +// the predescribed clusterv1beta1.Conditions. +func (r *AWSManagedCluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/api/v1beta2/conditions_consts.go b/api/v1beta2/conditions_consts.go index 11065009f8..a552158275 100644 --- a/api/v1beta2/conditions_consts.go +++ b/api/v1beta2/conditions_consts.go @@ -16,7 +16,8 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + const ( // PrincipalCredentialRetrievedCondition reports on whether Principal credentials could be retrieved successfully. // A possible scenario, where retrieval is unsuccessful, is when SourcePrincipal is not authorized for assume role. diff --git a/api/v1beta2/tags.go b/api/v1beta2/tags.go index ad04f2f3fb..344434f4a5 100644 --- a/api/v1beta2/tags.go +++ b/api/v1beta2/tags.go @@ -23,7 +23,8 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/validation/field" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) // Tags defines a map of tags. type Tags map[string]string diff --git a/api/v1beta2/types.go b/api/v1beta2/types.go index f6007356dd..4d173ef62e 100644 --- a/api/v1beta2/types.go +++ b/api/v1beta2/types.go @@ -21,7 +21,8 @@ import ( "k8s.io/apimachinery/pkg/util/sets" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) const ( // PreventDeletionLabel can be used in situations where preventing delation is allowed. The docs diff --git a/api/v1beta2/zz_generated.deepcopy.go b/api/v1beta2/zz_generated.deepcopy.go index e74c4b29eb..8562282419 100644 --- a/api/v1beta2/zz_generated.deepcopy.go +++ b/api/v1beta2/zz_generated.deepcopy.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -415,7 +416,7 @@ func (in *AWSClusterStatus) DeepCopyInto(out *AWSClusterStatus) { in.Network.DeepCopyInto(&out.Network) if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(v1beta1.FailureDomains, len(*in)) + *out = make(map[string]corev1beta2.FailureDomain, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -799,7 +800,7 @@ func (in *AWSMachineStatus) DeepCopyInto(out *AWSMachineStatus) { *out = *in if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]v1beta1.MachineAddress, len(*in)) + *out = make([]corev1beta2.MachineAddress, len(*in)) copy(*out, *in) } if in.InstanceState != nil { @@ -1030,7 +1031,7 @@ func (in *AWSManagedClusterStatus) DeepCopyInto(out *AWSManagedClusterStatus) { *out = *in if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(v1beta1.FailureDomains, len(*in)) + *out = make(map[string]corev1beta2.FailureDomain, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -1649,7 +1650,7 @@ func (in *Instance) DeepCopyInto(out *Instance) { } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]v1beta1.MachineAddress, len(*in)) + *out = make([]corev1beta2.MachineAddress, len(*in)) copy(*out, *in) } if in.PrivateIP != nil { diff --git a/bootstrap/eks/api/v1beta1/condition_consts.go b/bootstrap/eks/api/v1beta1/condition_consts.go index c91412e9f9..04dfc80d00 100644 --- a/bootstrap/eks/api/v1beta1/condition_consts.go +++ b/bootstrap/eks/api/v1beta1/condition_consts.go @@ -16,7 +16,8 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + // Conditions and condition Reasons for the EKSConfig object // FROM: https://github.com/kubernetes-sigs/cluster-api/blob/main/bootstrap/kubeadm/api/v1beta1/condition_consts.go diff --git a/bootstrap/eks/api/v1beta1/eksconfig_types.go b/bootstrap/eks/api/v1beta1/eksconfig_types.go index ecd3a9167a..8380ce1d7a 100644 --- a/bootstrap/eks/api/v1beta1/eksconfig_types.go +++ b/bootstrap/eks/api/v1beta1/eksconfig_types.go @@ -19,7 +19,8 @@ package v1beta1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) // EKSConfigSpec defines the desired state of Amazon EKS Bootstrap Configuration. type EKSConfigSpec struct { @@ -83,7 +84,7 @@ type EKSConfigStatus struct { // Conditions defines current service state of the EKSConfig. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -103,12 +104,12 @@ type EKSConfig struct { } // GetConditions returns the observations of the operational state of the EKSConfig resource. -func (r *EKSConfig) GetConditions() clusterv1.Conditions { +func (r *EKSConfig) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1.Conditions. -func (r *EKSConfig) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1beta1.Conditions. +func (r *EKSConfig) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/bootstrap/eks/api/v1beta1/zz_generated.conversion.go b/bootstrap/eks/api/v1beta1/zz_generated.conversion.go index aa2b37b195..6f9af363a2 100644 --- a/bootstrap/eks/api/v1beta1/zz_generated.conversion.go +++ b/bootstrap/eks/api/v1beta1/zz_generated.conversion.go @@ -27,7 +27,7 @@ import ( conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { @@ -251,7 +251,7 @@ func autoConvert_v1beta1_EKSConfigStatus_To_v1beta2_EKSConfigStatus(in *EKSConfi out.FailureReason = in.FailureReason out.FailureMessage = in.FailureMessage out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -266,7 +266,7 @@ func autoConvert_v1beta2_EKSConfigStatus_To_v1beta1_EKSConfigStatus(in *v1beta2. out.FailureReason = in.FailureReason out.FailureMessage = in.FailureMessage out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } diff --git a/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go b/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go index 756cbbe029..b09343ed28 100644 --- a/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go +++ b/bootstrap/eks/api/v1beta1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" - apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -151,7 +151,7 @@ func (in *EKSConfigStatus) DeepCopyInto(out *EKSConfigStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/bootstrap/eks/api/v1beta2/condition_consts.go b/bootstrap/eks/api/v1beta2/condition_consts.go index 0c4a81edff..6f5c49da7c 100644 --- a/bootstrap/eks/api/v1beta2/condition_consts.go +++ b/bootstrap/eks/api/v1beta2/condition_consts.go @@ -16,7 +16,10 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +import ( + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) + // Conditions and condition Reasons for the EKSConfig object // FROM: https://github.com/kubernetes-sigs/cluster-api/blob/main/bootstrap/kubeadm/api/v1beta1/condition_consts.go @@ -26,7 +29,7 @@ const ( // NOTE: When the DataSecret generation starts the process completes immediately and within the // same reconciliation, so the user will always see a transition from Wait to Generated without having // evidence that BootstrapSecret generation is started/in progress. - DataSecretAvailableCondition clusterv1.ConditionType = "DataSecretAvailable" + DataSecretAvailableCondition clusterv1beta1.ConditionType = "DataSecretAvailable" // DataSecretGenerationFailedReason (Severity=Warning) documents a EKSConfig controller detecting // an error while generating a data secret; those kind of errors are usually due to misconfigurations diff --git a/bootstrap/eks/api/v1beta2/eksconfig_types.go b/bootstrap/eks/api/v1beta2/eksconfig_types.go index dfcb014aa8..1f7905fc1e 100644 --- a/bootstrap/eks/api/v1beta2/eksconfig_types.go +++ b/bootstrap/eks/api/v1beta2/eksconfig_types.go @@ -19,7 +19,8 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) // EKSConfigSpec defines the desired state of Amazon EKS Bootstrap Configuration. type EKSConfigSpec struct { @@ -106,7 +107,7 @@ type EKSConfigStatus struct { // Conditions defines current service state of the EKSConfig. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // Encoding specifies the cloud-init file encoding. @@ -323,12 +324,12 @@ type EKSConfig struct { } // GetConditions returns the observations of the operational state of the EKSConfig resource. -func (r *EKSConfig) GetConditions() clusterv1.Conditions { +func (r *EKSConfig) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1.Conditions. -func (r *EKSConfig) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the EKSConfig to the predescribed clusterv1beta1.Conditions. +func (r *EKSConfig) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/bootstrap/eks/controllers/eksconfig_controller.go b/bootstrap/eks/controllers/eksconfig_controller.go index 11351a0c72..b9a4a800e1 100644 --- a/bootstrap/eks/controllers/eksconfig_controller.go +++ b/bootstrap/eks/controllers/eksconfig_controller.go @@ -25,6 +25,7 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -41,7 +42,8 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" bsutil "sigs.k8s.io/cluster-api/bootstrap/util" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" @@ -201,27 +203,27 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 } } - if cluster.Spec.ControlPlaneRef == nil || cluster.Spec.ControlPlaneRef.Kind != "AWSManagedControlPlane" { + if cluster.Spec.ControlPlaneRef.Name == "" || cluster.Spec.ControlPlaneRef.Kind != "AWSManagedControlPlane" { return errors.New("Cluster's controlPlaneRef needs to be an AWSManagedControlPlane in order to use the EKS bootstrap provider") } - if !cluster.Status.InfrastructureReady { + if !meta.IsStatusConditionTrue(cluster.GetConditions(), string(clusterv1beta1.InfrastructureReadyCondition)) { log.Info("Cluster infrastructure is not ready") conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForClusterInfrastructureReason, - clusterv1.ConditionSeverityInfo, "") + clusterv1beta1.ConditionSeverityInfo, "") return nil } - if !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + if !meta.IsStatusConditionTrue(cluster.GetConditions(), string(clusterv1beta1.ControlPlaneInitializedCondition)) { log.Info("Control Plane has not yet been initialized") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} - if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name, Namespace: cluster.Spec.ControlPlaneRef.Namespace}, controlPlane); err != nil { + if err := r.Get(ctx, client.ObjectKey{Name: cluster.Spec.ControlPlaneRef.Name, Namespace: cluster.Namespace}, controlPlane); err != nil { return err } @@ -229,7 +231,7 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 files, err := r.resolveFiles(ctx, config) if err != nil { log.Info("Failed to resolve files for user data") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -274,14 +276,14 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 userDataScript, err := userdata.NewNode(nodeInput) if err != nil { log.Error(err, "Failed to create a worker join configuration") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "") + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "") return err } // store userdata as secret if err := r.storeBootstrapData(ctx, cluster, config, userDataScript); err != nil { log.Error(err, "Failed to store bootstrap data") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1.ConditionSeverityWarning, "") + conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "") return err } @@ -313,7 +315,7 @@ func (r *EKSConfigReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Man err = c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc((r.ClusterToEKSConfigs)), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), logger.FromContext(ctx).GetLogger())), + predicates.ClusterUnpausedAndInfrastructureProvisioned(mgr.GetScheme(), logger.FromContext(ctx).GetLogger())), ) if err != nil { return errors.Wrap(err, "failed adding watch for Clusters to controller manager") @@ -369,7 +371,7 @@ func (r *EKSConfigReconciler) MachineToBootstrapMapFunc(_ context.Context, o cli if !ok { klog.Errorf("Expected a Machine but got a %T", o) } - if m.Spec.Bootstrap.ConfigRef != nil && m.Spec.Bootstrap.ConfigRef.GroupVersionKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig") { + if m.Spec.Bootstrap.ConfigRef.Name != "" && m.Spec.Bootstrap.ConfigRef.APIGroup == eksbootstrapv1.GroupVersion.Group && m.Spec.Bootstrap.ConfigRef.Kind == "EKSConfig" { name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } @@ -386,7 +388,7 @@ func (r *EKSConfigReconciler) MachinePoolToBootstrapMapFunc(_ context.Context, o klog.Errorf("Expected a MachinePool but got a %T", o) } configRef := m.Spec.Template.Spec.Bootstrap.ConfigRef - if configRef != nil && configRef.GroupVersionKind().GroupKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig").GroupKind() { + if configRef.Name != "" && configRef.APIGroup == eksbootstrapv1.GroupVersion.Group && configRef.Kind == "EKSConfig" { name := client.ObjectKey{Namespace: m.Namespace, Name: configRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } @@ -417,8 +419,9 @@ func (r *EKSConfigReconciler) ClusterToEKSConfigs(_ context.Context, o client.Ob } for _, m := range machineList.Items { - if m.Spec.Bootstrap.ConfigRef != nil && - m.Spec.Bootstrap.ConfigRef.GroupVersionKind().GroupKind() == eksbootstrapv1.GroupVersion.WithKind("EKSConfig").GroupKind() { + if m.Spec.Bootstrap.ConfigRef.Name != "" && + m.Spec.Bootstrap.ConfigRef.APIGroup == eksbootstrapv1.GroupVersion.Group && + m.Spec.Bootstrap.ConfigRef.Kind == "EKSConfig" { name := client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.Bootstrap.ConfigRef.Name} result = append(result, ctrl.Request{NamespacedName: name}) } diff --git a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go index 3f5bfd1b10..1ae2122b2c 100644 --- a/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go +++ b/bootstrap/eks/controllers/eksconfig_controller_reconciler_test.go @@ -24,15 +24,15 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/yaml" eksbootstrapv1 "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/bootstrap/eks/internal/userdata" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" ) func TestEKSConfigReconciler(t *testing.T) { @@ -84,7 +84,7 @@ func TestEKSConfigReconciler(t *testing.T) { config.ObjectMeta.OwnerReferences = []metav1.OwnerReference{ { Kind: "MachinePool", - APIVersion: v1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), Name: mp.Name, UID: types.UID(fmt.Sprintf("%s uid", mp.Name)), }, @@ -283,17 +283,18 @@ func newCluster(name string) *clusterv1.Cluster { Name: name, }, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - Name: name, - Kind: "AWSManagedControlPlane", - Namespace: "default", + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Name: name, + Kind: "AWSManagedControlPlane", }, }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, } - conditions.MarkTrue(cluster, clusterv1.ControlPlaneInitializedCondition) + cluster.Status.Initialization.ControlPlaneInitialized = ptr.To(true) return cluster } @@ -316,9 +317,9 @@ func newMachine(cluster *clusterv1.Cluster, name string) *clusterv1.Machine { }, Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - Kind: "EKSConfig", - APIVersion: eksbootstrapv1.GroupVersion.String(), + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "EKSConfig", + APIGroup: eksbootstrapv1.GroupVersion.Group, }, }, }, @@ -333,24 +334,24 @@ func newMachine(cluster *clusterv1.Cluster, name string) *clusterv1.Machine { } // newMachinePool returns a CAPI machine object; if cluster is not nil, the MachinePool is linked to the cluster as well. -func newMachinePool(cluster *clusterv1.Cluster, name string) *v1beta1.MachinePool { +func newMachinePool(cluster *clusterv1.Cluster, name string) *clusterv1.MachinePool { generatedName := fmt.Sprintf("%s-%s", name, util.RandomString(5)) - mp := &v1beta1.MachinePool{ + mp := &clusterv1.MachinePool{ TypeMeta: metav1.TypeMeta{ Kind: "MachinePool", - APIVersion: v1beta1.GroupVersion.String(), + APIVersion: clusterv1.GroupVersion.String(), }, ObjectMeta: metav1.ObjectMeta{ Namespace: "default", Name: generatedName, }, - Spec: v1beta1.MachinePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ Bootstrap: clusterv1.Bootstrap{ - ConfigRef: &corev1.ObjectReference{ - Kind: "EKSConfig", - APIVersion: eksbootstrapv1.GroupVersion.String(), + ConfigRef: clusterv1.ContractVersionedObjectReference{ + Kind: "EKSConfig", + APIGroup: eksbootstrapv1.GroupVersion.Group, }, }, }, @@ -396,7 +397,7 @@ func newEKSConfig(machine *clusterv1.Machine) *eksbootstrapv1.EKSConfig { } config.Status.DataSecretName = &machine.Name machine.Spec.Bootstrap.ConfigRef.Name = config.Name - machine.Spec.Bootstrap.ConfigRef.Namespace = config.Namespace + machine.Namespace = config.Namespace } return config } diff --git a/bootstrap/eks/controllers/eksconfig_controller_test.go b/bootstrap/eks/controllers/eksconfig_controller_test.go index 6f4a53c513..c647e7ea8a 100644 --- a/bootstrap/eks/controllers/eksconfig_controller_test.go +++ b/bootstrap/eks/controllers/eksconfig_controller_test.go @@ -22,8 +22,9 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/utils/ptr" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" bsutil "sigs.k8s.io/cluster-api/bootstrap/util" ) @@ -35,7 +36,9 @@ func TestEKSConfigReconcilerReturnEarlyIfClusterInfraNotReady(t *testing.T) { config := newEKSConfig(machine) cluster.Status = clusterv1.ClusterStatus{ - InfrastructureReady: false, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, } reconciler := EKSConfigReconciler{ @@ -56,7 +59,9 @@ func TestEKSConfigReconcilerReturnEarlyIfClusterControlPlaneNotInitialized(t *te config := newEKSConfig(machine) cluster.Status = clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, } reconciler := EKSConfigReconciler{ diff --git a/cmd/clusterawsadm/gc/gc.go b/cmd/clusterawsadm/gc/gc.go index ab44eee895..13b1c257a1 100644 --- a/cmd/clusterawsadm/gc/gc.go +++ b/cmd/clusterawsadm/gc/gc.go @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" "sigs.k8s.io/cluster-api/util/patch" ) @@ -187,9 +187,9 @@ func (c *CmdProcessor) getInfraCluster(ctx context.Context) (*unstructured.Unstr } ref := cluster.Spec.InfrastructureRef - obj, err := external.Get(ctx, c.client, ref) + obj, err := external.GetObjectFromContractVersionedRef(ctx, c.client, ref, c.namespace) if err != nil { - return nil, fmt.Errorf("getting infra cluster %s/%s: %w", ref.Namespace, ref.Name, err) + return nil, fmt.Errorf("getting infra cluster %s/%s: %w", c.namespace, ref.Name, err) } return obj, nil diff --git a/cmd/clusterawsadm/gc/gc_test.go b/cmd/clusterawsadm/gc/gc_test.go index 4720029d64..0c449c622b 100644 --- a/cmd/clusterawsadm/gc/gc_test.go +++ b/cmd/clusterawsadm/gc/gc_test.go @@ -22,7 +22,6 @@ import ( "testing" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -31,7 +30,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" ) @@ -105,7 +104,7 @@ func TestEnableGC(t *testing.T) { cluster := tc.existingObjs[0].(*clusterv1.Cluster) ref := cluster.Spec.InfrastructureRef - obj, err := external.Get(ctx, fake, ref) + obj, err := external.GetObjectFromContractVersionedRef(ctx, fake, ref, cluster.Namespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) @@ -176,7 +175,7 @@ func TestDisableGC(t *testing.T) { cluster := tc.existingObjs[0].(*clusterv1.Cluster) ref := cluster.Spec.InfrastructureRef - obj, err := external.Get(ctx, fake, ref) + obj, err := external.GetObjectFromContractVersionedRef(ctx, fake, ref, cluster.Namespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) @@ -271,7 +270,7 @@ func TestConfigureGC(t *testing.T) { cluster := tc.existingObjs[0].(*clusterv1.Cluster) ref := cluster.Spec.InfrastructureRef - obj, err := external.Get(ctx, fake, ref) + obj, err := external.GetObjectFromContractVersionedRef(ctx, fake, ref, cluster.Namespace) g.Expect(err).NotTo(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) @@ -304,11 +303,10 @@ func newManagedCluster(name string, excludeInfra bool) []client.Object { Namespace: "default", }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Name: name, - Namespace: "default", - Kind: "AWSManagedControlPlane", - APIVersion: ekscontrolplanev1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: name, + Kind: "AWSManagedControlPlane", + APIGroup: ekscontrolplanev1.GroupVersion.Group, }, }, }, @@ -351,11 +349,10 @@ func newUnManagedCluster(name string, excludeInfra bool) []client.Object { Namespace: "default", }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Name: name, - Namespace: "default", - Kind: "AWSCluster", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: name, + Kind: "AWSCluster", + APIGroup: infrav1.GroupVersion.Group, }, }, }, diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml index df89cffa49..012b19acf3 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanes.yaml @@ -154,18 +154,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object disableVPCCNI: default: false @@ -2387,18 +2388,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object eksClusterName: description: |- @@ -3893,7 +3895,7 @@ spec: failureDomains: additionalProperties: description: |- - FailureDomainSpec is the Schema for Cluster API failure domains. + FailureDomain is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: @@ -3906,6 +3908,13 @@ spec: description: controlPlane determines if this failure domain is suitable for use by control plane machines. type: boolean + name: + description: name is the name of the failure domain. + maxLength: 256 + minLength: 1 + type: string + required: + - name type: object description: FailureDomains specifies a list fo available availability zones that can be used diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml index 450fd296b0..201c29bcab 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_awsmanagedcontrolplanetemplates.yaml @@ -172,20 +172,21 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object eksClusterName: description: |- diff --git a/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml b/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml index 1f49d9548e..153e44f145 100644 --- a/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml +++ b/config/crd/bases/controlplane.cluster.x-k8s.io_rosacontrolplanes.yaml @@ -183,18 +183,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object credentialsSecretRef: description: |- diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml index 83416aa9ae..8c4273920c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclusters.yaml @@ -109,18 +109,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object controlPlaneLoadBalancer: description: ControlPlaneLoadBalancer is optional configuration for @@ -975,18 +976,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object controlPlaneLoadBalancer: description: ControlPlaneLoadBalancer is optional configuration for @@ -2588,7 +2590,7 @@ spec: failureDomains: additionalProperties: description: |- - FailureDomainSpec is the Schema for Cluster API failure domains. + FailureDomain is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: @@ -2601,8 +2603,14 @@ spec: description: controlPlane determines if this failure domain is suitable for use by control plane machines. type: boolean + name: + description: name is the name of the failure domain. + maxLength: 256 + minLength: 1 + type: string + required: + - name type: object - description: FailureDomains is a slice of FailureDomains. type: object networkStatus: description: NetworkStatus encapsulates AWS networking resources. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml index e4a0a6cf58..ee4e290e3c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsclustertemplates.yaml @@ -57,6 +57,7 @@ spec: description: |- Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + minProperties: 1 properties: annotations: additionalProperties: @@ -124,20 +125,21 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object controlPlaneLoadBalancer: description: ControlPlaneLoadBalancer is optional configuration @@ -486,6 +488,7 @@ spec: description: |- Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + minProperties: 1 properties: annotations: additionalProperties: @@ -553,20 +556,21 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object controlPlaneLoadBalancer: description: ControlPlaneLoadBalancer is optional configuration diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml index 5e3f55519d..1cb41ae6f6 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmachinetemplates.yaml @@ -52,6 +52,7 @@ spec: description: |- Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + minProperties: 1 properties: annotations: additionalProperties: @@ -466,6 +467,7 @@ spec: description: |- Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + minProperties: 1 properties: annotations: additionalProperties: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml index ad7df80fa0..c076d0b932 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclusters.yaml @@ -60,18 +60,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object type: object status: @@ -132,7 +133,7 @@ spec: failureDomains: additionalProperties: description: |- - FailureDomainSpec is the Schema for Cluster API failure domains. + FailureDomain is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: @@ -145,6 +146,13 @@ spec: description: controlPlane determines if this failure domain is suitable for use by control plane machines. type: boolean + name: + description: name is the name of the failure domain. + maxLength: 256 + minLength: 1 + type: string + required: + - name type: object description: FailureDomains specifies a list fo available availability zones that can be used diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclustertemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclustertemplates.yaml index 8b440da8a0..adee7a883c 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclustertemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_awsmanagedclustertemplates.yaml @@ -56,20 +56,21 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object type: object required: diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml index d3e8b80715..778527d15d 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_rosaclusters.yaml @@ -60,18 +60,19 @@ spec: controlPlaneEndpoint: description: ControlPlaneEndpoint represents the endpoint used to communicate with the control plane. + minProperties: 1 properties: host: description: host is the hostname on which the API server is serving. maxLength: 512 + minLength: 1 type: string port: description: port is the port on which the API server is serving. format: int32 + maximum: 65535 + minimum: 1 type: integer - required: - - host - - port type: object type: object status: @@ -132,7 +133,7 @@ spec: failureDomains: additionalProperties: description: |- - FailureDomainSpec is the Schema for Cluster API failure domains. + FailureDomain is the Schema for Cluster API failure domains. It allows controllers to understand how many failure domains a cluster can optionally span across. properties: attributes: @@ -145,6 +146,13 @@ spec: description: controlPlane determines if this failure domain is suitable for use by control plane machines. type: boolean + name: + description: name is the name of the failure domain. + maxLength: 256 + minLength: 1 + type: string + required: + - name type: object description: FailureDomains specifies a list fo available availability zones that can be used diff --git a/controllers/awscluster_controller.go b/controllers/awscluster_controller.go index 23cc9e45ba..cf1bdec3ee 100644 --- a/controllers/awscluster_controller.go +++ b/controllers/awscluster_controller.go @@ -27,6 +27,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -35,6 +36,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" @@ -49,10 +51,11 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/v2/util/conditions" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" capiannotations "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -291,17 +294,17 @@ func (r *AWSClusterReconciler) reconcileLoadBalancer(ctx context.Context, cluste if err := elbService.ReconcileLoadbalancers(ctx); err != nil { clusterScope.Error(err, "failed to reconcile load balancer") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) return nil, err } if awsCluster.Status.Network.APIServerELB.DNSName == "" { - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForDNSNameReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForDNSNameReason, clusterv1beta1.ConditionSeverityInfo, "") clusterScope.Info("Waiting on API server ELB DNS name") return &retryAfterDuration, nil } - conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + conditions.MarkTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) awsCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: awsCluster.Status.Network.APIServerELB.DNSName, @@ -336,12 +339,12 @@ func (r *AWSClusterReconciler) reconcileNormal(ctx context.Context, clusterScope if err := sgService.ReconcileSecurityGroups(); err != nil { clusterScope.Error(err, "failed to reconcile security groups") - conditions.MarkFalse(awsCluster, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(awsCluster, infrav1beta1.ClusterSecurityGroupsReadyCondition, infrav1beta1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) return reconcile.Result{}, err } if err := ec2Service.ReconcileBastion(); err != nil { - conditions.MarkFalse(awsCluster, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(awsCluster, infrav1beta1.BastionHostReadyCondition, infrav1beta1.BastionHostFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) clusterScope.Error(err, "failed to reconcile bastion host") return reconcile.Result{}, err } @@ -361,10 +364,10 @@ func (r *AWSClusterReconciler) reconcileNormal(ctx context.Context, clusterScope } if err := s3Service.ReconcileBucket(ctx); err != nil { - conditions.MarkFalse(awsCluster, infrav1.S3BucketReadyCondition, infrav1.S3BucketFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(awsCluster, infrav1beta1.S3BucketReadyCondition, infrav1.S3BucketFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile S3 Bucket for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name) } - conditions.MarkTrue(awsCluster, infrav1.S3BucketReadyCondition) + conditions.MarkTrue(awsCluster, infrav1beta1.S3BucketReadyCondition) for _, subnet := range clusterScope.Subnets().FilterPrivate() { found := false @@ -375,8 +378,8 @@ func (r *AWSClusterReconciler) reconcileNormal(ctx context.Context, clusterScope } } - clusterScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomainSpec{ - ControlPlane: found, + clusterScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomain{ + ControlPlane: ptr.To(found), }) } @@ -419,18 +422,18 @@ func (r *AWSClusterReconciler) requeueAWSClusterForUnpausedCluster(_ context.Con } // Make sure the ref is set - if c.Spec.InfrastructureRef == nil { + if !c.Spec.InfrastructureRef.IsDefined() { log.Trace("Cluster does not have an InfrastructureRef, skipping mapping.") return nil } - if c.Spec.InfrastructureRef.GroupVersionKind().Kind != "AWSCluster" { + if c.Spec.InfrastructureRef.Kind != "AWSCluster" { log.Trace("Cluster has an InfrastructureRef for a different type, skipping mapping.") return nil } awsCluster := &infrav1.AWSCluster{} - key := types.NamespacedName{Namespace: c.Spec.InfrastructureRef.Namespace, Name: c.Spec.InfrastructureRef.Name} + key := types.NamespacedName{Namespace: c.Namespace, Name: c.Spec.InfrastructureRef.Name} if err := r.Get(ctx, key, awsCluster); err != nil { log.Error(err, "Failed to get AWS cluster") @@ -457,21 +460,21 @@ func (r *AWSClusterReconciler) checkForExternalControlPlaneLoadBalancer(clusterS switch { case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0 && awsCluster.Spec.ControlPlaneEndpoint.Port == 0: clusterScope.Info("AWSCluster control plane endpoint is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") return &requeueAfterPeriod case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0: clusterScope.Info("AWSCluster control plane endpoint host is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") return &requeueAfterPeriod case awsCluster.Spec.ControlPlaneEndpoint.Port == 0: clusterScope.Info("AWSCluster control plane endpoint port is still non-populated") - conditions.MarkFalse(awsCluster, infrav1.LoadBalancerReadyCondition, infrav1.WaitForExternalControlPlaneEndpointReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") return &requeueAfterPeriod default: - conditions.MarkTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + conditions.MarkTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) return nil } diff --git a/controllers/awscluster_controller_test.go b/controllers/awscluster_controller_test.go index bc5804a679..25a1cb7b22 100644 --- a/controllers/awscluster_controller_test.go +++ b/controllers/awscluster_controller_test.go @@ -35,6 +35,7 @@ import ( "k8s.io/client-go/util/retry" "sigs.k8s.io/controller-runtime/pkg/client" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" @@ -43,7 +44,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/network" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" ) @@ -156,7 +157,7 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { g.Expect(cluster.Spec.ControlPlaneEndpoint.Host).To(BeEmpty()) g.Expect(cluster.Spec.ControlPlaneEndpoint.Port).To(BeZero()) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionFalse, severity: clusterv1.ConditionSeverityInfo, reason: infrav1.WaitForExternalControlPlaneEndpointReason}, + {conditionType: infrav1beta1.LoadBalancerReadyCondition, status: corev1.ConditionFalse, severity: clusterv1beta1.ConditionSeverityInfo, reason: infrav1beta1.WaitForExternalControlPlaneEndpointReason}, }) // Mimicking an external operator patching the cluster with an already provisioned Load Balancer: // this could be done by a human who provisioned a LB, or by a Control Plane provider. @@ -176,11 +177,11 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(cs.VPC().ID).To(Equal("vpc-exists")) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.LoadBalancerReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.LoadBalancerReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, }) }) t.Run("Should successfully reconcile AWSCluster creation with unmanaged VPC", func(t *testing.T) { @@ -275,10 +276,10 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(cs.VPC().ID).To(Equal("vpc-exists")) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, }) }) t.Run("Should successfully reconcile AWSCluster creation with unmanaged VPC and a network type load balancer", func(t *testing.T) { @@ -382,10 +383,10 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(cs.VPC().ID).To(Equal("vpc-exists")) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, }) }) t.Run("Should successfully reconcile AWSCluster creation with managed VPC", func(t *testing.T) { @@ -475,10 +476,10 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(cs.VPC().ID).To(Equal("vpc-new")) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {conditionType: infrav1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, - {conditionType: infrav1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.ClusterSecurityGroupsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.BastionHostReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.VpcReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, + {conditionType: infrav1beta1.SubnetsReadyCondition, status: corev1.ConditionTrue, severity: "", reason: ""}, }) // Information should get written back into the `ClusterScope` object. Keeping it up to date means that @@ -651,15 +652,15 @@ func TestAWSClusterReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileDelete(ctx, cs) g.Expect(err).To(BeNil()) expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{ - {infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.BastionHostReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.SecondaryCidrsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletingReason}, - {infrav1.RouteTablesReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.VpcEndpointsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.NatGatewaysReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.InternetGatewayReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.SubnetsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.VpcReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, + {infrav1beta1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1beta1.BastionHostReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1beta1.SecondaryCidrsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletingReason}, + {infrav1beta1.RouteTablesReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1beta1.VpcEndpointsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1beta1.NatGatewaysReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1beta1.InternetGatewayReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1beta1.SubnetsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1beta1.VpcReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, }) }) } diff --git a/controllers/awscluster_controller_unit_test.go b/controllers/awscluster_controller_unit_test.go index c28544c677..425cb1fb7f 100644 --- a/controllers/awscluster_controller_unit_test.go +++ b/controllers/awscluster_controller_unit_test.go @@ -34,11 +34,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" ) @@ -244,7 +246,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { }) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).To(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionTrue, "", ""}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1beta1.LoadBalancerReadyCondition, corev1.ConditionTrue, "", ""}}) g.Expect(awsCluster.GetFinalizers()).To(ContainElement(infrav1.ClusterFinalizer)) }) @@ -320,7 +322,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).ToNot(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.ClusterSecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.ClusterSecurityGroupReconciliationFailedReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1beta1.ClusterSecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.ClusterSecurityGroupReconciliationFailedReason}}) }) t.Run("Should fail AWSCluster create with BastionHostReadyCondition status false", func(t *testing.T) { g := NewWithT(t) @@ -343,7 +345,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).ToNot(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.BastionHostReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.BastionHostFailedReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1beta1.BastionHostReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.BastionHostFailedReason}}) }) t.Run("Should fail AWSCluster create with failure in LoadBalancer reconciliation", func(t *testing.T) { g := NewWithT(t) @@ -367,7 +369,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).ToNot(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.LoadBalancerFailedReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1beta1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.LoadBalancerFailedReason}}) }) t.Run("Should fail AWSCluster create with LoadBalancer reconcile failure with WaitForDNSName condition as false", func(t *testing.T) { g := NewWithT(t) @@ -391,7 +393,7 @@ func TestAWSClusterReconcileOperations(t *testing.T) { g.Expect(err).To(BeNil()) _, err = reconciler.reconcileNormal(context.TODO(), cs) g.Expect(err).To(BeNil()) - expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitForDNSNameReason}}) + expectAWSClusterConditions(g, cs.AWSCluster, []conditionAssertion{{infrav1beta1.LoadBalancerReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1beta1.WaitForDNSNameReason}}) }) }) }) diff --git a/controllers/awsmachine_controller.go b/controllers/awsmachine_controller.go index 079bbf9164..c9f5546390 100644 --- a/controllers/awsmachine_controller.go +++ b/controllers/awsmachine_controller.go @@ -45,6 +45,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/source" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/feature" @@ -60,9 +61,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -294,7 +296,7 @@ func (r *AWSMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Ma return controller.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(requeueAWSMachinesForUnpausedCluster), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log.GetLogger())), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log.GetLogger())), ) } @@ -335,13 +337,13 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope // all the other errors are blocking. // Because we are reconciling all load balancers, attempt to treat the error as a list of errors. if err = kerrors.FilterOut(err, elb.IsAccessDenied, elb.IsNotFound); err != nil { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, errors.Errorf("failed to reconcile LB attachment: %+v", err) } } if machineScope.IsControlPlane() { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } if feature.Gates.Enabled(feature.EventBridgeInstanceState) { @@ -366,7 +368,7 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope machineScope.Info("Terminating EC2 instance", "instance-id", instance.ID) // Set the InstanceReadyCondition and patch the object before the blocking operation - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := machineScope.PatchObject(); err != nil { machineScope.Error(err, "failed to patch object") return ctrl.Result{}, err @@ -374,11 +376,11 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope if err := ec2Service.TerminateInstance(instance.ID); err != nil { machineScope.Error(err, "failed to terminate instance") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedTerminate", "Failed to terminate instance %q: %v", instance.ID, err) return ctrl.Result{}, err } - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // If the AWSMachine specifies NetworkStatus Interfaces, detach the cluster's core Security Groups from them as part of deletion. if len(machineScope.AWSMachine.Spec.NetworkInterfaces) > 0 { @@ -394,7 +396,7 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope "instanceID", instance.ID, ) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := machineScope.PatchObject(); err != nil { return ctrl.Result{}, err } @@ -402,11 +404,11 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope for _, id := range machineScope.AWSMachine.Spec.NetworkInterfaces { if err := ec2Service.DetachSecurityGroupsFromNetworkInterface(core, id); err != nil { machineScope.Error(err, "failed to detach security groups from instance's network interfaces") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, err } } - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } // Release an Elastic IP when the machine has public IP Address (EIP) with a cluster-wide config @@ -476,16 +478,16 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope return ctrl.Result{}, nil } - if !machineScope.Cluster.Status.InfrastructureReady { + if !*machineScope.Cluster.Status.Initialization.InfrastructureProvisioned { machineScope.Info("Cluster infrastructure is not ready yet") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated. if !machineScope.IsMachinePoolMachine() && machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { machineScope.Info("Bootstrap data secret reference is not yet available") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -495,13 +497,13 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope instance, err := r.findInstance(machineScope, ec2svc) if err != nil { machineScope.Error(err, "unable to find instance") - conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, "%s", err.Error()) + conditions.MarkUnknown(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceNotFoundReason, "%s", err.Error()) return ctrl.Result{}, err } if instance == nil && machineScope.IsMachinePoolMachine() { err = errors.New("no instance found for machine pool") machineScope.Error(err, "unable to find instance") - conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotFoundReason, "%s", err.Error()) + conditions.MarkUnknown(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceNotFoundReason, "%s", err.Error()) return ctrl.Result{}, err } @@ -517,8 +519,8 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope // Create new instance since providerId is nil and instance could not be found by tags. if instance == nil { // Avoid a flickering condition between InstanceProvisionStarted and InstanceProvisionFailed if there's a persistent failure with createInstance - if conditions.GetReason(machineScope.AWSMachine, infrav1.InstanceReadyCondition) != infrav1.InstanceProvisionFailedReason { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionStartedReason, clusterv1.ConditionSeverityInfo, "") + if conditions.GetReason(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition) != infrav1beta1.InstanceProvisionFailedReason { + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceProvisionStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if patchErr := machineScope.PatchObject(); patchErr != nil { machineScope.Error(patchErr, "failed to patch conditions") return ctrl.Result{}, patchErr @@ -534,7 +536,7 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope instance, err = r.createInstance(ctx, ec2svc, machineScope, clusterScope, objectStoreSvc) if err != nil { machineScope.Error(err, "unable to create instance") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceProvisionFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, err } } @@ -584,13 +586,13 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope case infrav1.InstanceStatePending: machineScope.SetNotReady() shouldRequeue = true - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceNotReadyReason, clusterv1.ConditionSeverityWarning, "") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceNotReadyReason, clusterv1beta1.ConditionSeverityWarning, "") case infrav1.InstanceStateStopping, infrav1.InstanceStateStopped: machineScope.SetNotReady() - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceStoppedReason, clusterv1.ConditionSeverityError, "") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceStoppedReason, clusterv1beta1.ConditionSeverityError, "") case infrav1.InstanceStateRunning: machineScope.SetReady() - conditions.MarkTrue(machineScope.AWSMachine, infrav1.InstanceReadyCondition) + conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition) case infrav1.InstanceStateShuttingDown, infrav1.InstanceStateTerminated: machineScope.SetNotReady() @@ -598,12 +600,12 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope // In an auto-scaling group, instance termination is perfectly normal on scale-down // and therefore should not be reported as error. machineScope.Info("EC2 instance of machine pool was terminated", "state", instance.State, "instance-id", *machineScope.GetInstanceID()) - r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, infrav1.InstanceTerminatedReason, "EC2 instance termination") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceTerminatedReason, clusterv1.ConditionSeverityInfo, "") + r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, infrav1beta1.InstanceTerminatedReason, "EC2 instance termination") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceTerminatedReason, clusterv1beta1.ConditionSeverityInfo, "") } else { machineScope.Info("Unexpected EC2 instance termination", "state", instance.State, "instance-id", *machineScope.GetInstanceID()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "InstanceUnexpectedTermination", "Unexpected EC2 instance termination") - conditions.MarkFalse(machineScope.AWSMachine, infrav1.InstanceReadyCondition, infrav1.InstanceTerminatedReason, clusterv1.ConditionSeverityError, "") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceTerminatedReason, clusterv1beta1.ConditionSeverityError, "") } default: machineScope.SetNotReady() @@ -611,7 +613,7 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "InstanceUnhandledState", "EC2 instance state is undefined") machineScope.SetFailureReason("UpdateError") machineScope.SetFailureMessage(errors.Errorf("EC2 instance state %q is undefined", instance.State)) - conditions.MarkUnknown(machineScope.AWSMachine, infrav1.InstanceReadyCondition, "", "") + conditions.MarkUnknown(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, "", "") } // reconcile the deletion of the bootstrap data secret now that we have updated instance state @@ -681,11 +683,11 @@ func (r *AWSMachineReconciler) reconcileOperationalState(ec2svc services.EC2Inte // Ensure that the security groups are correct. _, err = r.ensureSecurityGroups(ec2svc, machineScope, machineScope.AWSMachine.Spec.AdditionalSecurityGroups, existingSecurityGroups) if err != nil { - conditions.MarkFalse(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition, infrav1.SecurityGroupsFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, infrav1beta1.SecurityGroupsFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) machineScope.Error(err, "unable to ensure security groups") return err } - conditions.MarkTrue(machineScope.AWSMachine, infrav1.SecurityGroupsReadyCondition) + conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition) err = r.ensureInstanceMetadataOptions(ec2svc, instance, machineScope.AWSMachine) if err != nil { @@ -712,7 +714,7 @@ func (r *AWSMachineReconciler) deleteEncryptedBootstrapDataSecret(machineScope * } // Do nothing if the AWSMachine is not in a failed state, and is operational from an EC2 perspective, but does not have a node reference - if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && machineScope.Machine.Status.NodeRef == nil && !machineScope.AWSMachineIsDeleted() { + if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && !machineScope.Machine.Status.NodeRef.IsDefined() && !machineScope.AWSMachineIsDeleted() { return nil } machineScope.Info("Deleting unneeded entry from AWS Secret", "secretPrefix", machineScope.GetSecretPrefix()) @@ -923,7 +925,7 @@ func (r *AWSMachineReconciler) deleteBootstrapData(ctx context.Context, machineS func (r *AWSMachineReconciler) deleteIgnitionBootstrapDataFromS3(ctx context.Context, machineScope *scope.MachineScope, objectStoreSvc services.ObjectStoreInterface) error { // Do nothing if the AWSMachine is not in a failed state, and is operational from an EC2 perspective, but does not have a node reference - if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && machineScope.Machine.Status.NodeRef == nil && !machineScope.AWSMachineIsDeleted() { + if !machineScope.HasFailed() && machineScope.InstanceIsOperational() && !machineScope.Machine.Status.NodeRef.IsDefined() && !machineScope.AWSMachineIsDeleted() { return nil } @@ -1014,12 +1016,12 @@ func (r *AWSMachineReconciler) registerInstanceToClassicLB(ctx context.Context, if err := elbsvc.RegisterInstanceWithAPIServerELB(ctx, i); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with classic load balancer: %v", i.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not register control plane instance %q with classic load balancer", i.ID) } r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB", "Control plane instance %q is registered with classic load balancer", i.ID) - conditions.MarkTrue(machineScope.AWSMachine, infrav1.ELBAttachedCondition) + conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition) return nil } @@ -1039,19 +1041,19 @@ func (r *AWSMachineReconciler) registerInstanceToV2LB(ctx context.Context, machi if ptr.Deref(machineScope.GetInstanceState(), infrav1.InstanceStatePending) != infrav1.InstanceStateRunning { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Cannot register control plane instance %q with load balancer: instance is not running", instance.ID) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityInfo, "instance not running") + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityInfo, "instance not running") return elb.NewInstanceNotRunning("instance is not running") } if err := elbsvc.RegisterInstanceWithAPIServerLB(ctx, instance, lb); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with load balancer: %v", instance.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBAttachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not register control plane instance %q with load balancer", instance.ID) } r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB", "Control plane instance %q is registered with load balancer", instance.ID) - conditions.MarkTrue(machineScope.AWSMachine, infrav1.ELBAttachedCondition) + conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition) return nil } @@ -1070,7 +1072,7 @@ func (r *AWSMachineReconciler) deregisterInstanceFromClassicLB(ctx context.Conte if err := elbsvc.DeregisterInstanceFromAPIServerELB(ctx, instance); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB", "Failed to deregister control plane instance %q from load balancer: %v", instance.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBDetachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", instance.ID) } @@ -1095,7 +1097,7 @@ func (r *AWSMachineReconciler) deregisterInstanceFromV2LB(ctx context.Context, m if err := elbsvc.DeregisterInstanceFromAPIServerLB(ctx, targetGroupArn, i); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB", "Failed to deregister control plane instance %q from load balancer: %v", i.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1.ELBAttachedCondition, infrav1.ELBDetachFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBDetachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", i.ID) } } @@ -1166,7 +1168,7 @@ func (r *AWSMachineReconciler) requestsForCluster(log logger.Wrapper, namespace, result := make([]ctrl.Request, 0, len(machineList.Items)) for _, m := range machineList.Items { log.WithValues("machine", klog.KObj(&m)) - if m.Spec.InfrastructureRef.GroupVersionKind().Kind != "AWSMachine" { + if m.Spec.InfrastructureRef.Kind != "AWSMachine" { log.Trace("Machine has an InfrastructureRef for a different type, will not add to reconciliation request.") continue } @@ -1174,7 +1176,7 @@ func (r *AWSMachineReconciler) requestsForCluster(log logger.Wrapper, namespace, log.Trace("Machine has an InfrastructureRef with an empty name, will not add to reconciliation request.") continue } - log.WithValues("awsMachine", klog.KRef(m.Spec.InfrastructureRef.Namespace, m.Spec.InfrastructureRef.Name)) + log.WithValues("awsMachine", klog.KRef(m.Namespace, m.Spec.InfrastructureRef.Name)) log.Trace("Adding AWSMachine to reconciliation request.") result = append(result, ctrl.Request{NamespacedName: client.ObjectKey{Namespace: m.Namespace, Name: m.Spec.InfrastructureRef.Name}}) } @@ -1186,7 +1188,7 @@ func (r *AWSMachineReconciler) getInfraCluster(ctx context.Context, log *logger. var managedControlPlaneScope *scope.ManagedControlPlaneScope var err error - if cluster.Spec.ControlPlaneRef != nil && cluster.Spec.ControlPlaneRef.Kind == "AWSManagedControlPlane" { + if cluster.Spec.ControlPlaneRef.IsDefined() && cluster.Spec.ControlPlaneRef.Kind == "AWSManagedControlPlane" { controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} controlPlaneName := client.ObjectKey{ Namespace: awsMachine.Namespace, diff --git a/controllers/awsmachine_controller_test.go b/controllers/awsmachine_controller_test.go index 63abd127ef..b5e3129ac3 100644 --- a/controllers/awsmachine_controller_test.go +++ b/controllers/awsmachine_controller_test.go @@ -35,6 +35,7 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" @@ -43,9 +44,10 @@ import ( elbService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { @@ -140,7 +142,7 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) ms.Machine.Spec.Bootstrap.DataSecretName = aws.String("bootstrap-data") - ms.Machine.Spec.Version = aws.String("test") + ms.Machine.Spec.Version = "test" ms.AWSMachine.Spec.Subnet = &infrav1.AWSResourceReference{ID: aws.String("subnet-1")} ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateRunning ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""} @@ -167,9 +169,9 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileNormal(ctx, ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) expectConditions(g, ms.AWSMachine, []conditionAssertion{ - {infrav1.SecurityGroupsReadyCondition, corev1.ConditionTrue, "", ""}, - {infrav1.InstanceReadyCondition, corev1.ConditionTrue, "", ""}, - {infrav1.ELBAttachedCondition, corev1.ConditionTrue, "", ""}, + {infrav1beta1.SecurityGroupsReadyCondition, corev1.ConditionTrue, "", ""}, + {infrav1beta1.InstanceReadyCondition, corev1.ConditionTrue, "", ""}, + {infrav1beta1.ELBAttachedCondition, corev1.ConditionTrue, "", ""}, }) g.Expect(ms.AWSMachine.Finalizers).Should(ContainElement(infrav1.MachineFinalizer)) }) @@ -241,8 +243,8 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) expectConditions(g, ms.AWSMachine, []conditionAssertion{ - {infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, - {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, + {infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, + {infrav1beta1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, }) g.Expect(ms.AWSMachine.Finalizers).ShouldNot(ContainElement(infrav1.MachineFinalizer)) }) @@ -320,7 +322,7 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { g.Expect(err).To(BeNil()) ms.Machine.Spec.Bootstrap.DataSecretName = aws.String("bootstrap-data") - ms.Machine.Spec.Version = aws.String("test") + ms.Machine.Spec.Version = "test" ms.AWSMachine.Spec.Subnet = &infrav1.AWSResourceReference{ID: aws.String("subnet-1")} ms.AWSMachine.Status.InstanceState = &infrav1.InstanceStateRunning ms.Machine.Labels = map[string]string{clusterv1.MachineControlPlaneLabel: ""} @@ -346,7 +348,7 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileNormal(ctx, ms, cs, cs, cs, cs) g.Expect(err).Should(HaveOccurred()) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionTrue, "", ""}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionTrue, "", ""}}) g.Expect(ms.AWSMachine.Finalizers).Should(ContainElement(infrav1.MachineFinalizer)) }) t.Run("Should fail in reconciling control-plane machine deletion while terminating instance ", func(t *testing.T) { @@ -422,8 +424,8 @@ func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { _, err = reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).Should(HaveOccurred()) expectConditions(g, ms.AWSMachine, []conditionAssertion{ - {infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}, - {infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}, + {infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, "DeletingFailed"}, + {infrav1beta1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}, }) g.Expect(ms.AWSMachine.Finalizers).ShouldNot(ContainElement(infrav1.MachineFinalizer)) }) @@ -438,7 +440,9 @@ func getMachineScope(cs *scope.ClusterScope, awsMachine *infrav1.AWSMachine) (*s Name: "test", }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, Machine: &clusterv1.Machine{ @@ -528,9 +532,9 @@ func (p *pointsTo) String() string { } type conditionAssertion struct { - conditionType clusterv1.ConditionType + conditionType clusterv1beta1.ConditionType status corev1.ConditionStatus - severity clusterv1.ConditionSeverity + severity clusterv1beta1.ConditionSeverity reason string } diff --git a/controllers/awsmachine_controller_unit_test.go b/controllers/awsmachine_controller_unit_test.go index de1d3a8676..b2cb63bd91 100644 --- a/controllers/awsmachine_controller_unit_test.go +++ b/controllers/awsmachine_controller_unit_test.go @@ -43,6 +43,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "sigs.k8s.io/controller-runtime/pkg/reconcile" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" @@ -52,8 +53,9 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/mock_services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" kubeadmv1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" ) @@ -115,7 +117,9 @@ func TestAWSMachineReconciler(t *testing.T) { Name: "test", }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, Machine: &clusterv1.Machine{ @@ -155,7 +159,9 @@ func TestAWSMachineReconciler(t *testing.T) { Client: client, Cluster: &clusterv1.Cluster{ Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, Machine: &clusterv1.Machine{ @@ -232,7 +238,7 @@ func TestAWSMachineReconciler(t *testing.T) { setup(t, g, awsMachine) defer teardown(t, g) runningInstance(t, g) - ms.Cluster.Status.InfrastructureReady = false + ms.Cluster.Status.Initialization.InfrastructureProvisioned = ptr.To(false) buf := new(bytes.Buffer) klog.SetOutput(buf) @@ -240,7 +246,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Cluster infrastructure is not ready yet")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1beta1.WaitingForClusterInfrastructureReason}}) }) t.Run("should exit immediately if bootstrap data secret reference isn't available", func(t *testing.T) { @@ -258,7 +264,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Bootstrap data secret reference is not yet available")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForBootstrapDataReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1beta1.WaitingForBootstrapDataReason}}) }) t.Run("should return an error when we can't list instances by tags", func(t *testing.T) { @@ -393,7 +399,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.InstanceNotReadyReason}}) }) t.Run("should set instance to running", func(t *testing.T) { @@ -413,7 +419,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.Ready).To(BeTrue()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) expectConditions(g, ms.AWSMachine, []conditionAssertion{ - {conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionTrue}, + {conditionType: infrav1beta1.InstanceReadyCondition, status: corev1.ConditionTrue}, }) }) }) @@ -435,7 +441,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(buf.String()).To(ContainSubstring("EC2 instance state is undefined")) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("InstanceUnhandledState"))) g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"NewAWSMachineState\" is undefined"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{conditionType: infrav1.InstanceReadyCondition, status: corev1.ConditionUnknown}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{conditionType: infrav1beta1.InstanceReadyCondition, status: corev1.ConditionUnknown}}) }) t.Run("security Groups succeed", func(t *testing.T) { getCoreSecurityGroups := func(t *testing.T, g *WithT) { @@ -464,7 +470,7 @@ func TestAWSMachineReconciler(t *testing.T) { ec2Svc.EXPECT().GetAdditionalSecurityGroupsIDs(gomock.Any()).Return([]string{"sg-2345"}, nil) _, _ = reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{conditionType: infrav1.SecurityGroupsReadyCondition, status: corev1.ConditionTrue}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{conditionType: infrav1beta1.SecurityGroupsReadyCondition, status: corev1.ConditionTrue}}) }) t.Run("should not tag instances if there's no tags", func(t *testing.T) { @@ -574,7 +580,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopping))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1beta1.InstanceStoppedReason}}) }) t.Run("should then set instance to stopped and unready", func(t *testing.T) { @@ -590,7 +596,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(ms.AWSMachine.Status.InstanceState).To(PointTo(Equal(infrav1.InstanceStateStopped))) g.Expect(ms.AWSMachine.Status.Ready).To(BeFalse()) g.Expect(buf.String()).To(ContainSubstring("EC2 instance state changed")) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceStoppedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1beta1.InstanceStoppedReason}}) }) t.Run("should then set instance to running and ready once it is restarted", func(t *testing.T) { @@ -648,7 +654,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(buf.String()).To(ContainSubstring("Unexpected EC2 instance termination")) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("UnexpectedTermination"))) g.Expect(ms.AWSMachine.Status.FailureMessage).To(PointTo(Equal("EC2 instance state \"terminated\" is unexpected"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceTerminatedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1beta1.InstanceTerminatedReason}}) }) }) t.Run("should not register if control plane ELB is already registered", func(t *testing.T) { @@ -674,7 +680,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.InstanceNotReadyReason}}) }) t.Run("should attach control plane ELB to instance", func(t *testing.T) { g := NewWithT(t) @@ -700,8 +706,8 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionTrue, "", ""}}) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.ELBAttachedCondition, corev1.ConditionTrue, "", ""}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.InstanceNotReadyReason}}) }) t.Run("should store userdata for CloudInit using AWS Secrets Manager only when not skipped", func(t *testing.T) { g := NewWithT(t) @@ -721,7 +727,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.InstanceNotReadyReason}}) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) t.Run("should fail to delete bootstrap data secret if AWSMachine state is updated", func(t *testing.T) { @@ -730,9 +736,8 @@ func TestAWSMachineReconciler(t *testing.T) { setup(t, g, awsMachine) defer teardown(t, g) instanceCreate(t, g) - ms.Machine.Status.NodeRef = &corev1.ObjectReference{ - Namespace: "default", - Name: "test", + ms.Machine.Status.NodeRef = clusterv1.MachineNodeReference{ + Name: "test", } secretSvc.EXPECT().UserData(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil, nil).Times(1) @@ -740,7 +745,7 @@ func TestAWSMachineReconciler(t *testing.T) { secretSvc.EXPECT().Delete(gomock.Any()).Return(errors.New("failed to delete entries from AWS Secret")).Times(1) _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.InstanceNotReadyReason}}) g.Expect(err).To(MatchError(ContainSubstring("failed to delete entries from AWS Secret"))) }) }) @@ -771,7 +776,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err.Error()).To(ContainSubstring(expectedError)) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.InstanceProvisionFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1beta1.InstanceProvisionFailedReason}}) }) t.Run("should fail to determine the registration status of control plane ELB", func(t *testing.T) { g := NewWithT(t) @@ -797,7 +802,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err.Error()).To(ContainSubstring("error describing ELB")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("FailedAttachControlPlaneELB"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.InstanceNotReadyReason}}) }) t.Run("should fail to attach control plane ELB to instance", func(t *testing.T) { g := NewWithT(t) @@ -823,7 +828,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err).ToNot(BeNil()) g.Expect(err.Error()).To(ContainSubstring("failed to attach ELB")) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("FailedAttachControlPlaneELB"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.InstanceNotReadyReason}}) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) }) t.Run("should fail to delete bootstrap data secret if AWSMachine is in failed state", func(t *testing.T) { @@ -862,7 +867,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err.Error()).To(ContainSubstring("json: cannot unmarshal number into Go value of type map[string]interface {}")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.InstanceNotReadyReason}}) }) t.Run("Should fail to update resource tags after instance is created", func(t *testing.T) { g := NewWithT(t) @@ -881,7 +886,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, infrav1.InstanceNotReadyReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.InstanceReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, infrav1beta1.InstanceNotReadyReason}}) }) }) t.Run("While ensuring SecurityGroups", func(t *testing.T) { @@ -912,7 +917,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1beta1.SecurityGroupsFailedReason}}) }) t.Run("Should fail to fetch core security groups", func(t *testing.T) { g := NewWithT(t) @@ -930,7 +935,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1beta1.SecurityGroupsFailedReason}}) }) t.Run("Should fail if ensureSecurityGroups fails to fetch additional security groups", func(t *testing.T) { g := NewWithT(t) @@ -960,7 +965,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1beta1.SecurityGroupsFailedReason}}) }) t.Run("Should fail to update security group", func(t *testing.T) { g := NewWithT(t) @@ -991,7 +996,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs, cs) g.Expect(err).ToNot(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(infrav1.MachineFinalizer)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityError, infrav1.SecurityGroupsFailedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.SecurityGroupsReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityError, infrav1beta1.SecurityGroupsFailedReason}}) }) }) }) @@ -1071,10 +1076,8 @@ func TestAWSMachineReconciler(t *testing.T) { ID: "myMachine", } - ms.Machine.Status.NodeRef = &corev1.ObjectReference{ - Kind: "Node", - Name: "myMachine", - APIVersion: "v1", + ms.Machine.Status.NodeRef = clusterv1.MachineNodeReference{ + Name: "myMachine", } ms.AWSMachine.Spec.CloudInit = infrav1.CloudInit{ @@ -1416,10 +1419,8 @@ func TestAWSMachineReconciler(t *testing.T) { ID: "myMachine", } - ms.Machine.Status.NodeRef = &corev1.ObjectReference{ - Kind: "Node", - Name: "myMachine", - APIVersion: "v1", + ms.Machine.Status.NodeRef = clusterv1.MachineNodeReference{ + Name: "myMachine", } ec2Svc.EXPECT().GetRunningInstanceByTags(gomock.Any()).Return(instance, nil).AnyTimes() @@ -1839,7 +1840,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err.Error()).To(ContainSubstring("error describing ELB")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) g.Eventually(recorder.Events).Should(Receive(ContainSubstring("FailedDetachControlPlaneELB"))) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, "DeletingFailed"}}) }) t.Run("should not do anything if control plane ELB is already detached from instance", func(t *testing.T) { @@ -1862,7 +1863,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}}) }) }) }) @@ -1888,7 +1889,7 @@ func TestAWSMachineReconciler(t *testing.T) { _, err := reconciler.reconcileDelete(context.TODO(), ms, cs, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, clusterv1.DeletedReason}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, clusterv1beta1.DeletedReason}}) }) t.Run("should fail to detach control plane ELB from instance", func(t *testing.T) { g := NewWithT(t) @@ -1912,7 +1913,7 @@ func TestAWSMachineReconciler(t *testing.T) { g.Expect(err).ToNot(BeNil()) g.Expect(err.Error()).To(ContainSubstring("Duplicate access point name for load balancer")) g.Expect(ms.AWSMachine.Finalizers).To(ContainElement(metav1.FinalizerDeleteDependents)) - expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityWarning, "DeletingFailed"}}) + expectConditions(g, ms.AWSMachine, []conditionAssertion{{infrav1beta1.ELBAttachedCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityWarning, "DeletingFailed"}}) }) t.Run("should fail if secretPrefix present, but secretCount is not set", func(t *testing.T) { g := NewWithT(t) @@ -1977,10 +1978,10 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-6", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-6", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2017,10 +2018,10 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-1", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-1", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2049,10 +2050,10 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-2", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-2", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2078,10 +2079,10 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - Name: "aws-machine-3", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + Name: "aws-machine-3", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2115,10 +2116,10 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "Machine", - Name: "aws-machine-4", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "Machine", + Name: "aws-machine-4", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2148,9 +2149,9 @@ func TestAWSMachineReconcilerAWSClusterToAWSMachines(t *testing.T) { }, Spec: clusterv1.MachineSpec{ ClusterName: "capi-test", - InfrastructureRef: corev1.ObjectReference{ - Kind: "AWSMachine", - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSMachine", + APIGroup: infrav1.GroupVersion.Group, }, }, }, @@ -2358,7 +2359,7 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { }, }, ownerCluster: &clusterv1.Cluster{ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{Name: "foo"}, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{Name: "foo"}, }}, expectError: false, }, @@ -2389,7 +2390,7 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{Kind: AWSManagedControlPlaneRefKind}, + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{Kind: AWSManagedControlPlaneRefKind}, }, }, expectError: false, @@ -2422,7 +2423,7 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{Name: "aws-test-5"}, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{Name: "aws-test-5"}, }, }, expectError: false, @@ -2455,7 +2456,7 @@ func TestAWSMachineReconcilerReconcile(t *testing.T) { ownerCluster: &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1"}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{Name: "aws-test-5"}, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{Name: "aws-test-5"}, }, }, awsCluster: &infrav1.AWSCluster{ObjectMeta: metav1.ObjectMeta{Name: "aws-test-5"}}, @@ -2541,21 +2542,21 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi ownerCluster := &clusterv1.Cluster{ ObjectMeta: metav1.ObjectMeta{Name: "capi-test-1", Namespace: ns}, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "AWSCluster", - Name: "capi-test-1", // assuming same name - Namespace: ns, - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + Name: "capi-test-1", // assuming same name + APIGroup: infrav1.GroupVersion.Group, }, - ControlPlaneRef: &corev1.ObjectReference{ - Kind: "KubeadmControlPlane", - Namespace: cp.Namespace, - Name: cp.Name, - APIVersion: kubeadmv1beta1.GroupVersion.String(), + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Kind: "KubeadmControlPlane", + Name: cp.Name, + APIGroup: kubeadmv1beta1.GroupVersion.Group, }, }, Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, } @@ -2647,7 +2648,7 @@ func TestAWSMachineReconcilerReconcileDefaultsToLoadBalancerTypeClassic(t *testi }, }, Status: infrav1.AWSMachineStatus{ - Conditions: clusterv1.Conditions{ + Conditions: clusterv1beta1.Conditions{ { Type: "Paused", Status: corev1.ConditionFalse, diff --git a/controllers/awsmanagedcluster_controller.go b/controllers/awsmanagedcluster_controller.go index 9804db0b15..b5f0beee88 100644 --- a/controllers/awsmanagedcluster_controller.go +++ b/controllers/awsmanagedcluster_controller.go @@ -36,7 +36,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" @@ -83,7 +83,7 @@ func (r *AWSManagedClusterReconciler) Reconcile(ctx context.Context, req ctrl.Re controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} controlPlaneRef := types.NamespacedName{ Name: cluster.Spec.ControlPlaneRef.Name, - Namespace: cluster.Spec.ControlPlaneRef.Namespace, + Namespace: cluster.Namespace, } if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { @@ -181,8 +181,8 @@ func (r *AWSManagedClusterReconciler) managedControlPlaneToManagedCluster(_ cont } managedClusterRef := cluster.Spec.InfrastructureRef - if managedClusterRef == nil || managedClusterRef.Kind != "AWSManagedCluster" { - log.Info("InfrastructureRef is nil or not AWSManagedCluster, skipping mapping") + if !managedClusterRef.IsDefined() || managedClusterRef.Kind != "AWSManagedCluster" { + log.Info("InfrastructureRef is not defined or not AWSManagedCluster, skipping mapping") return nil } @@ -190,7 +190,7 @@ func (r *AWSManagedClusterReconciler) managedControlPlaneToManagedCluster(_ cont { NamespacedName: types.NamespacedName{ Name: managedClusterRef.Name, - Namespace: managedClusterRef.Namespace, + Namespace: cluster.Namespace, }, }, } diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index f754583667..5a7d53f46c 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -36,8 +36,8 @@ import ( elbService "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/elb" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const DNSName = "www.google.com" diff --git a/controllers/rosacluster_controller.go b/controllers/rosacluster_controller.go index 5a5da2214d..7dd592d2bb 100644 --- a/controllers/rosacluster_controller.go +++ b/controllers/rosacluster_controller.go @@ -22,7 +22,6 @@ import ( cmv1 "github.com/openshift-online/ocm-sdk-go/clustersmgmt/v1" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -48,8 +47,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + expclusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" @@ -111,7 +110,7 @@ func (r *ROSAClusterReconciler) Reconcile(ctx context.Context, req ctrl.Request) controlPlane := &rosacontrolplanev1.ROSAControlPlane{} controlPlaneRef := types.NamespacedName{ Name: cluster.Spec.ControlPlaneRef.Name, - Namespace: cluster.Spec.ControlPlaneRef.Namespace, + Namespace: cluster.Namespace, } if err := r.Get(ctx, controlPlaneRef, controlPlane); err != nil { @@ -222,8 +221,8 @@ func (r *ROSAClusterReconciler) rosaControlPlaneToManagedCluster(log *logger.Log } rosaClusterRef := cluster.Spec.InfrastructureRef - if rosaClusterRef == nil || rosaClusterRef.Kind != "ROSACluster" { - log.Info("InfrastructureRef is nil or not ROSACluster, skipping mapping") + if !rosaClusterRef.IsDefined() || rosaClusterRef.Kind != "ROSACluster" { + log.Info("InfrastructureRef is not defined or not ROSACluster, skipping mapping") return nil } @@ -231,7 +230,7 @@ func (r *ROSAClusterReconciler) rosaControlPlaneToManagedCluster(log *logger.Log { NamespacedName: types.NamespacedName{ Name: rosaClusterRef.Name, - Namespace: rosaClusterRef.Namespace, + Namespace: cluster.Namespace, }, }, } @@ -299,10 +298,10 @@ func (r *ROSAClusterReconciler) buildROSAMachinePool(nodePoolName string, cluste Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To(string("")), }, - InfrastructureRef: corev1.ObjectReference{ - APIVersion: expinfrav1.GroupVersion.String(), - Kind: "ROSAMachinePool", - Name: rosaMachinePool.Name, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + APIGroup: expinfrav1.GroupVersion.Group, + Kind: "ROSAMachinePool", + Name: rosaMachinePool.Name, }, }, }, diff --git a/controllers/rosacluster_controller_test.go b/controllers/rosacluster_controller_test.go index e7e5cfbe58..490a92f62a 100644 --- a/controllers/rosacluster_controller_test.go +++ b/controllers/rosacluster_controller_test.go @@ -30,6 +30,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -43,8 +44,9 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + expclusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -145,19 +147,17 @@ func TestRosaClusterReconcile(t *testing.T) { UID: types.UID("capi-cluster-1"), }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Name: rosaCluster.Name, - Kind: "ROSACluster", - APIVersion: expinfrav1.GroupVersion.String(), - Namespace: ns.Name, + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaCluster.Name, + Kind: "ROSACluster", + APIGroup: expinfrav1.GroupVersion.Group, }, - ControlPlaneRef: &corev1.ObjectReference{ - Name: rosaControlPlane.Name, - Kind: "ROSAControlPlane", - APIVersion: rosacontrolplanev1.GroupVersion.String(), - Namespace: ns.Name, + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaControlPlane.Name, + Kind: "ROSAControlPlane", + APIGroup: rosacontrolplanev1.GroupVersion.Group, }, - Paused: false, + Paused: ptr.To(false), }, } @@ -186,11 +186,11 @@ func TestRosaClusterReconcile(t *testing.T) { // set rosaCluster pause conditions rosaClsPatch, err := patch.NewHelper(rosaCluster, testEnv) - rosaCluster.Status.Conditions = clusterv1.Conditions{ - clusterv1.Condition{ - Type: clusterv1.PausedV1Beta2Condition, + rosaCluster.Status.Conditions = clusterv1beta1.Conditions{ + clusterv1beta1.Condition{ + Type: clusterv1beta1.PausedV1Beta2Condition, Status: corev1.ConditionFalse, - Reason: clusterv1.NotPausedV1Beta2Reason, + Reason: clusterv1beta1.NotPausedV1Beta2Reason, Message: "", }, } @@ -199,11 +199,11 @@ func TestRosaClusterReconcile(t *testing.T) { // set capiCluster pause condition clsPatch, err := patch.NewHelper(capiCluster, testEnv) - capiCluster.Status.Conditions = clusterv1.Conditions{ + capiCluster.Status.Deprecated.V1Beta1.Conditions = clusterv1.Conditions{ clusterv1.Condition{ - Type: clusterv1.PausedV1Beta2Condition, + Type: clusterv1beta1.PausedV1Beta2Condition, Status: corev1.ConditionFalse, - Reason: clusterv1.NotPausedV1Beta2Reason, + Reason: clusterv1beta1.NotPausedV1Beta2Reason, Message: "", }, } diff --git a/controllers/suite_test.go b/controllers/suite_test.go index b48a95dfbf..e7ee34cc64 100644 --- a/controllers/suite_test.go +++ b/controllers/suite_test.go @@ -31,9 +31,9 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" kubeadmv1beta1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" - expclusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + expclusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( diff --git a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go index c892288bae..2e7114afec 100644 --- a/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta1/awsmanagedcontrolplane_types.go @@ -21,7 +21,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) const ( // ManagedControlPlaneFinalizer allows the controller to clean up resources on delete. @@ -243,7 +245,7 @@ type AWSManagedControlPlaneStatus struct { Network infrav1.NetworkStatus `json:"networkStatus,omitempty"` // FailureDomains specifies a list fo available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains clusterv1beta1.FailureDomains `json:"failureDomains,omitempty"` // Bastion holds details of the instance that is used as a bastion jump box // +optional Bastion *infrav1.Instance `json:"bastion,omitempty"` @@ -267,7 +269,7 @@ type AWSManagedControlPlaneStatus struct { // +optional FailureMessage *string `json:"failureMessage,omitempty"` // Conditions specifies the cpnditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // Addons holds the current status of the EKS addons // +optional Addons []AddonState `json:"addons,omitempty"` @@ -307,12 +309,12 @@ type AWSManagedControlPlaneList struct { } // GetConditions returns the control planes conditions. -func (r *AWSManagedControlPlane) GetConditions() clusterv1.Conditions { +func (r *AWSManagedControlPlane) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the status conditions for the AWSManagedControlPlane. -func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/controlplane/eks/api/v1beta1/conditions_consts.go b/controlplane/eks/api/v1beta1/conditions_consts.go index 930d2c92c9..971a778e33 100644 --- a/controlplane/eks/api/v1beta1/conditions_consts.go +++ b/controlplane/eks/api/v1beta1/conditions_consts.go @@ -16,44 +16,45 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + const ( // EKSControlPlaneReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSControlPlaneReadyCondition clusterv1.ConditionType = "EKSControlPlaneReady" + EKSControlPlaneReadyCondition clusterv1beta1.ConditionType = "EKSControlPlaneReady" // EKSControlPlaneCreatingCondition condition reports on whether the eks // control plane is creating. - EKSControlPlaneCreatingCondition clusterv1.ConditionType = "EKSControlPlaneCreating" + EKSControlPlaneCreatingCondition clusterv1beta1.ConditionType = "EKSControlPlaneCreating" // EKSControlPlaneUpdatingCondition condition reports on whether the eks // control plane is updating. - EKSControlPlaneUpdatingCondition clusterv1.ConditionType = "EKSControlPlaneUpdating" + EKSControlPlaneUpdatingCondition clusterv1beta1.ConditionType = "EKSControlPlaneUpdating" // EKSControlPlaneReconciliationFailedReason used to report failures while reconciling EKS control plane. EKSControlPlaneReconciliationFailedReason = "EKSControlPlaneReconciliationFailed" ) const ( // IAMControlPlaneRolesReadyCondition condition reports on the successful reconciliation of eks control plane iam roles. - IAMControlPlaneRolesReadyCondition clusterv1.ConditionType = "IAMControlPlaneRolesReady" + IAMControlPlaneRolesReadyCondition clusterv1beta1.ConditionType = "IAMControlPlaneRolesReady" // IAMControlPlaneRolesReconciliationFailedReason used to report failures while reconciling EKS control plane iam roles. IAMControlPlaneRolesReconciliationFailedReason = "IAMControlPlaneRolesReconciliationFailed" ) const ( // IAMAuthenticatorConfiguredCondition condition reports on the successful reconciliation of aws-iam-authenticator config. - IAMAuthenticatorConfiguredCondition clusterv1.ConditionType = "IAMAuthenticatorConfigured" + IAMAuthenticatorConfiguredCondition clusterv1beta1.ConditionType = "IAMAuthenticatorConfigured" // IAMAuthenticatorConfigurationFailedReason used to report failures while reconciling the aws-iam-authenticator config. IAMAuthenticatorConfigurationFailedReason = "IAMAuthenticatorConfigurationFailed" ) const ( // EKSAddonsConfiguredCondition condition reports on the successful reconciliation of EKS addons. - EKSAddonsConfiguredCondition clusterv1.ConditionType = "EKSAddonsConfigured" + EKSAddonsConfiguredCondition clusterv1beta1.ConditionType = "EKSAddonsConfigured" // EKSAddonsConfiguredFailedReason used to report failures while reconciling the EKS addons. EKSAddonsConfiguredFailedReason = "EKSAddonsConfiguredFailed" ) const ( // EKSIdentityProviderConfiguredCondition condition reports on the successful association of identity provider config. - EKSIdentityProviderConfiguredCondition clusterv1.ConditionType = "EKSIdentityProviderConfigured" + EKSIdentityProviderConfiguredCondition clusterv1beta1.ConditionType = "EKSIdentityProviderConfigured" // EKSIdentityProviderConfiguredFailedReason used to report failures while reconciling the identity provider config association. EKSIdentityProviderConfiguredFailedReason = "EKSIdentityProviderConfiguredFailed" ) diff --git a/controlplane/eks/api/v1beta1/zz_generated.conversion.go b/controlplane/eks/api/v1beta1/zz_generated.conversion.go index 07c12ce9c3..adbbbe0e9a 100644 --- a/controlplane/eks/api/v1beta1/zz_generated.conversion.go +++ b/controlplane/eks/api/v1beta1/zz_generated.conversion.go @@ -29,7 +29,8 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func init() { @@ -390,7 +391,19 @@ func autoConvert_v1beta2_AWSManagedControlPlaneSpec_To_v1beta1_AWSManagedControl func autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPlaneStatus(in *AWSManagedControlPlaneStatus, out *v1beta2.AWSManagedControlPlaneStatus, s conversion.Scope) error { out.Network = in.Network - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make(map[string]corev1beta2.FailureDomain, len(*in)) + for key, val := range *in { + (*out)[key] = corev1beta2.FailureDomain{ + Name: key, + ControlPlane: &val.ControlPlane, + Attributes: val.Attributes, + } + } + } else { + out.FailureDomains = nil + } out.Bastion = (*apiv1beta2.Instance)(unsafe.Pointer(in.Bastion)) if err := Convert_v1beta1_OIDCProviderStatus_To_v1beta2_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil { return err @@ -399,7 +412,7 @@ func autoConvert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedContr out.Initialized = in.Initialized out.Ready = in.Ready out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) out.Addons = *(*[]v1beta2.AddonState)(unsafe.Pointer(&in.Addons)) if err := Convert_v1beta1_IdentityProviderStatus_To_v1beta2_IdentityProviderStatus(&in.IdentityProviderStatus, &out.IdentityProviderStatus, s); err != nil { return err @@ -414,7 +427,18 @@ func Convert_v1beta1_AWSManagedControlPlaneStatus_To_v1beta2_AWSManagedControlPl func autoConvert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedControlPlaneStatus(in *v1beta2.AWSManagedControlPlaneStatus, out *AWSManagedControlPlaneStatus, s conversion.Scope) error { out.Network = in.Network - out.FailureDomains = *(*apiv1beta1.FailureDomains)(unsafe.Pointer(&in.FailureDomains)) + if in.FailureDomains != nil { + in, out := &in.FailureDomains, &out.FailureDomains + *out = make(corev1beta1.FailureDomains, len(*in)) + for key, val := range *in { + (*out)[key] = corev1beta1.FailureDomainSpec{ + ControlPlane: *val.ControlPlane, + Attributes: val.Attributes, + } + } + } else { + out.FailureDomains = nil + } out.Bastion = (*apiv1beta2.Instance)(unsafe.Pointer(in.Bastion)) if err := Convert_v1beta2_OIDCProviderStatus_To_v1beta1_OIDCProviderStatus(&in.OIDCProvider, &out.OIDCProvider, s); err != nil { return err @@ -423,7 +447,7 @@ func autoConvert_v1beta2_AWSManagedControlPlaneStatus_To_v1beta1_AWSManagedContr out.Initialized = in.Initialized out.Ready = in.Ready out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) out.Addons = *(*[]AddonState)(unsafe.Pointer(&in.Addons)) if err := Convert_v1beta2_IdentityProviderStatus_To_v1beta1_IdentityProviderStatus(&in.IdentityProviderStatus, &out.IdentityProviderStatus, s); err != nil { return err diff --git a/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go b/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go index 0324486959..b8b79a6d4a 100644 --- a/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go +++ b/controlplane/eks/api/v1beta1/zz_generated.deepcopy.go @@ -24,7 +24,7 @@ import ( "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -190,7 +190,7 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane in.Network.DeepCopyInto(&out.Network) if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(apiv1beta1.FailureDomains, len(*in)) + *out = make(corev1beta1.FailureDomains, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } @@ -213,7 +213,7 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go index ee5e47c3d4..5a87366fd4 100644 --- a/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go +++ b/controlplane/eks/api/v1beta2/awsmanagedcontrolplane_types.go @@ -21,7 +21,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) const ( // ManagedControlPlaneFinalizer allows the controller to clean up resources on delete. @@ -307,7 +309,7 @@ type AWSManagedControlPlaneStatus struct { Network infrav1.NetworkStatus `json:"networkStatus,omitempty"` // FailureDomains specifies a list fo available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains map[string]clusterv1.FailureDomain `json:"failureDomains,omitempty"` // Bastion holds details of the instance that is used as a bastion jump box // +optional Bastion *infrav1.Instance `json:"bastion,omitempty"` @@ -331,7 +333,7 @@ type AWSManagedControlPlaneStatus struct { // +optional FailureMessage *string `json:"failureMessage,omitempty"` // Conditions specifies the cpnditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // Addons holds the current status of the EKS addons // +optional Addons []AddonState `json:"addons,omitempty"` @@ -374,12 +376,12 @@ type AWSManagedControlPlaneList struct { } // GetConditions returns the control planes conditions. -func (r *AWSManagedControlPlane) GetConditions() clusterv1.Conditions { +func (r *AWSManagedControlPlane) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the status conditions for the AWSManagedControlPlane. -func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (r *AWSManagedControlPlane) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/controlplane/eks/api/v1beta2/conditions_consts.go b/controlplane/eks/api/v1beta2/conditions_consts.go index b67a6280ef..d28e6953da 100644 --- a/controlplane/eks/api/v1beta2/conditions_consts.go +++ b/controlplane/eks/api/v1beta2/conditions_consts.go @@ -16,7 +16,8 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + const ( // EKSControlPlaneReadyCondition condition reports on the successful reconciliation of eks control plane. EKSControlPlaneReadyCondition clusterv1.ConditionType = "EKSControlPlaneReady" diff --git a/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go index 0e1b766d8b..aa87ceef83 100644 --- a/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go +++ b/controlplane/eks/api/v1beta2/zz_generated.deepcopy.go @@ -25,6 +25,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -195,7 +196,7 @@ func (in *AWSManagedControlPlaneStatus) DeepCopyInto(out *AWSManagedControlPlane in.Network.DeepCopyInto(&out.Network) if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(v1beta1.FailureDomains, len(*in)) + *out = make(map[string]corev1beta2.FailureDomain, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go index 8605228781..e7b2e14485 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -35,7 +36,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "sigs.k8s.io/controller-runtime/pkg/source" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta1" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/feature" @@ -52,9 +55,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -178,7 +182,7 @@ func (r *AWSManagedControlPlaneReconciler) SetupWithManager(ctx context.Context, if err = c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, awsManagedControlPlane.GroupVersionKind(), mgr.GetClient(), &ekscontrolplanev1.AWSManagedControlPlane{})), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log.GetLogger())), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log.GetLogger())), ); err != nil { return fmt.Errorf("failed adding a watch for ready clusters: %w", err) } @@ -258,28 +262,28 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct // Always close the scope defer func() { - applicableConditions := []clusterv1.ConditionType{ - ekscontrolplanev1.EKSControlPlaneReadyCondition, - ekscontrolplanev1.IAMControlPlaneRolesReadyCondition, - ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, - ekscontrolplanev1.EKSAddonsConfiguredCondition, - infrav1.VpcReadyCondition, - infrav1.SubnetsReadyCondition, - infrav1.ClusterSecurityGroupsReadyCondition, + applicableConditions := []clusterv1beta1.ConditionType{ + ekscontrolplanev1beta1.EKSControlPlaneReadyCondition, + ekscontrolplanev1beta1.IAMControlPlaneRolesReadyCondition, + ekscontrolplanev1beta1.IAMAuthenticatorConfiguredCondition, + ekscontrolplanev1beta1.EKSAddonsConfiguredCondition, + infrav1beta1.VpcReadyCondition, + infrav1beta1.SubnetsReadyCondition, + infrav1beta1.ClusterSecurityGroupsReadyCondition, } if managedScope.VPC().IsManaged(managedScope.Name()) { applicableConditions = append(applicableConditions, - infrav1.InternetGatewayReadyCondition, - infrav1.NatGatewaysReadyCondition, - infrav1.RouteTablesReadyCondition, - infrav1.VpcEndpointsReadyCondition, + infrav1beta1.InternetGatewayReadyCondition, + infrav1beta1.NatGatewaysReadyCondition, + infrav1beta1.RouteTablesReadyCondition, + infrav1beta1.VpcEndpointsReadyCondition, ) if managedScope.Bastion().Enabled { - applicableConditions = append(applicableConditions, infrav1.BastionHostReadyCondition) + applicableConditions = append(applicableConditions, infrav1beta1.BastionHostReadyCondition) } if managedScope.VPC().IsIPv6Enabled() { - applicableConditions = append(applicableConditions, infrav1.EgressOnlyInternetGatewayReadyCondition) + applicableConditions = append(applicableConditions, infrav1beta1.EgressOnlyInternetGatewayReadyCondition) } } @@ -302,7 +306,7 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, managedScope *scope.ManagedControlPlaneScope) (res ctrl.Result, reterr error) { managedScope.Info("Reconciling AWSManagedControlPlane") - if managedScope.Cluster.Spec.InfrastructureRef == nil { + if !managedScope.Cluster.Spec.InfrastructureRef.IsDefined() { managedScope.Info("InfrastructureRef not set, skipping reconciliation") return ctrl.Result{}, nil } @@ -312,7 +316,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, // infrastructureRef and controlplaneRef. if managedScope.Cluster.Spec.InfrastructureRef.Kind != awsManagedControlPlaneKind { // Wait for the cluster infrastructure to be ready before creating machines - if !managedScope.Cluster.Status.InfrastructureReady { + if !*managedScope.Cluster.Status.Initialization.InfrastructureProvisioned { managedScope.Info("Cluster infrastructure is not ready yet") return ctrl.Result{RequeueAfter: r.WaitInfraPeriod}, nil } @@ -339,12 +343,12 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } if err := sgService.ReconcileSecurityGroups(); err != nil { - conditions.MarkFalse(awsManagedControlPlane, infrav1.ClusterSecurityGroupsReadyCondition, infrav1.ClusterSecurityGroupReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(awsManagedControlPlane, infrav1beta1.ClusterSecurityGroupsReadyCondition, infrav1beta1.ClusterSecurityGroupReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile general security groups for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) } if err := ec2Service.ReconcileBastion(); err != nil { - conditions.MarkFalse(awsManagedControlPlane, infrav1.BastionHostReadyCondition, infrav1.BastionHostFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(awsManagedControlPlane, infrav1beta1.BastionHostReadyCondition, infrav1beta1.BastionHostFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, fmt.Errorf("failed to reconcile bastion host for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) } @@ -353,7 +357,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } if err := awsnodeService.ReconcileCNI(ctx); err != nil { - conditions.MarkFalse(managedScope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(managedScope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, infrav1beta1.SecondaryCidrReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, fmt.Errorf("failed to reconcile control plane for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) } @@ -369,14 +373,14 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } } if err := authService.ReconcileIAMAuthenticator(ctx); err != nil { - conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1beta1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile aws-iam-authenticator config for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) } - conditions.MarkTrue(awsManagedControlPlane, ekscontrolplanev1.IAMAuthenticatorConfiguredCondition) + conditions.MarkTrue(awsManagedControlPlane, ekscontrolplanev1beta1.IAMAuthenticatorConfiguredCondition) for _, subnet := range managedScope.Subnets().FilterPrivate() { - managedScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomainSpec{ - ControlPlane: true, + managedScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomain{ + ControlPlane: ptr.To(true), }) } @@ -451,8 +455,8 @@ func (r *AWSManagedControlPlaneReconciler) ClusterToAWSManagedControlPlane(o cli } controlPlaneRef := c.Spec.ControlPlaneRef - if controlPlaneRef != nil && controlPlaneRef.Kind == awsManagedControlPlaneKind { - return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: controlPlaneRef.Namespace, Name: controlPlaneRef.Name}}} + if controlPlaneRef.Kind == awsManagedControlPlaneKind { + return []ctrl.Request{{NamespacedName: client.ObjectKey{Namespace: c.Namespace, Name: controlPlaneRef.Name}}} } return nil @@ -522,8 +526,8 @@ func (r *AWSManagedControlPlaneReconciler) managedClusterToManagedControlPlane(_ } controlPlaneRef := cluster.Spec.ControlPlaneRef - if controlPlaneRef == nil || controlPlaneRef.Kind != awsManagedControlPlaneKind { - log.Debug("ControlPlaneRef is nil or not AWSManagedControlPlane, skipping mapping") + if controlPlaneRef.Kind != awsManagedControlPlaneKind { + log.Debug("ControlPlaneRef is not defined or not AWSManagedControlPlane, skipping mapping") return nil } @@ -531,7 +535,7 @@ func (r *AWSManagedControlPlaneReconciler) managedClusterToManagedControlPlane(_ { NamespacedName: types.NamespacedName{ Name: controlPlaneRef.Name, - Namespace: controlPlaneRef.Namespace, + Namespace: cluster.Namespace, }, }, } diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go index 2e24b8dc37..ce4e74aa19 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller_test.go @@ -39,6 +39,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" + "k8s.io/utils/ptr" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -56,7 +57,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/securitygroup" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" ) @@ -138,7 +139,8 @@ func TestAWSManagedControlPlaneReconcilerIntegrationTests(t *testing.T) { mockedEKSCluster(ctx, g, eksMock.EXPECT(), iamMock.EXPECT(), ec2Mock.EXPECT(), stsMock.EXPECT(), awsNodeMock.EXPECT(), kubeProxyMock.EXPECT(), iamAuthenticatorMock.EXPECT()) g.Expect(testEnv.Create(ctx, &cluster)).To(Succeed()) - cluster.Status.InfrastructureReady = true + cluster.Status.Initialization.InfrastructureProvisioned = ptr.To(true) + g.Expect(testEnv.Client.Status().Update(ctx, &cluster)).To(Succeed()) g.Expect(testEnv.Create(ctx, &awsManagedCluster)).To(Succeed()) g.Expect(testEnv.Create(ctx, &awsManagedControlPlane)).To(Succeed()) @@ -158,7 +160,7 @@ func TestAWSManagedControlPlaneReconcilerIntegrationTests(t *testing.T) { // patch the paused condition awsManagedControlPlanePatcher, err := patch.NewHelper(&awsManagedControlPlane, testEnv) - awsManagedControlPlane.Status.Conditions = clusterv1.Conditions{ + awsManagedControlPlane.Status.Conditions = clusterv1beta1.Conditions{ { Type: "Paused", Status: corev1.ConditionFalse, @@ -465,7 +467,8 @@ func mockedCallsForMissingEverything(ec2Rec *mocks.MockEC2APIMockRecorder, subne Name: aws.String("tag-key"), Values: []string{"sigs.k8s.io/cluster-api-provider-aws/cluster/test-cluster"}, }, - }})).Return(&ec2.DescribeRouteTablesOutput{ + }, + })).Return(&ec2.DescribeRouteTablesOutput{ RouteTables: []ec2types.RouteTable{ { Routes: []ec2types.Route{ @@ -525,7 +528,8 @@ func mockedCallsForMissingEverything(ec2Rec *mocks.MockEC2APIMockRecorder, subne Name: aws.String("state"), Values: []string{string(ec2types.VpcStatePending), string(ec2types.VpcStateAvailable)}, }, - }}), gomock.Any()).Return(&ec2.DescribeNatGatewaysOutput{}, nil).MinTimes(1).MaxTimes(2) + }, + }), gomock.Any()).Return(&ec2.DescribeNatGatewaysOutput{}, nil).MinTimes(1).MaxTimes(2) ec2Rec.DescribeAddresses(context.TODO(), gomock.Eq(&ec2.DescribeAddressesInput{ Filters: []ec2types.Filter{ diff --git a/controlplane/eks/controllers/helpers_test.go b/controlplane/eks/controllers/helpers_test.go index 6b403ac6d9..31cca3d7eb 100644 --- a/controlplane/eks/controllers/helpers_test.go +++ b/controlplane/eks/controllers/helpers_test.go @@ -16,7 +16,6 @@ limitations under the License. package controllers import ( - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -25,7 +24,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) func getAWSManagedControlPlaneScope(cluster *clusterv1.Cluster, awsManagedControlPlane *ekscontrolplanev1.AWSManagedControlPlane) *scope.ManagedControlPlaneScope { scope, err := scope.NewManagedControlPlaneScope( @@ -49,17 +49,15 @@ func getManagedClusterObjects(name, namespace string) (clusterv1.Cluster, infrav UID: "1", }, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - APIVersion: ekscontrolplanev1.GroupVersion.String(), + ControlPlaneRef: clusterv1.ContractVersionedObjectReference { + APIGroup: ekscontrolplanev1.GroupVersion.Group, Name: name, - Kind: "AWSManagedControlPlane", - Namespace: namespace, + Kind: "AWSManagedControlPlane", }, - InfrastructureRef: &corev1.ObjectReference{ - APIVersion: infrav1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference { + APIGroup: infrav1.GroupVersion.Group, Name: name, - Kind: "AWSManagedCluster", - Namespace: namespace, + Kind: "AWSManagedCluster", }, }, } diff --git a/controlplane/eks/controllers/suite_test.go b/controlplane/eks/controllers/suite_test.go index 827081a258..360f4fbf2c 100644 --- a/controlplane/eks/controllers/suite_test.go +++ b/controlplane/eks/controllers/suite_test.go @@ -29,7 +29,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) var ( testEnv *helpers.TestEnvironment diff --git a/controlplane/rosa/api/v1beta1/conditions_consts.go b/controlplane/rosa/api/v1beta1/conditions_consts.go new file mode 100644 index 0000000000..3fb3be26cd --- /dev/null +++ b/controlplane/rosa/api/v1beta1/conditions_consts.go @@ -0,0 +1,51 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + +const ( + // ROSAControlPlaneReadyCondition condition reports on the successful reconciliation of ROSAControlPlane. + ROSAControlPlaneReadyCondition clusterv1beta1.ConditionType = "ROSAControlPlaneReady" + + // ROSAControlPlaneValidCondition condition reports whether ROSAControlPlane configuration is valid. + ROSAControlPlaneValidCondition clusterv1beta1.ConditionType = "ROSAControlPlaneValid" + + // ROSAControlPlaneUpgradingCondition condition reports whether ROSAControlPlane is upgrading or not. + ROSAControlPlaneUpgradingCondition clusterv1beta1.ConditionType = "ROSAControlPlaneUpgrading" + + // ExternalAuthConfiguredCondition condition reports whether external auth has beed correctly configured. + ExternalAuthConfiguredCondition clusterv1beta1.ConditionType = "ExternalAuthConfigured" + + // ROSARoleConfigReadyCondition condition reports whether the referenced RosaRoleConfig is ready. + ROSARoleConfigReadyCondition clusterv1beta1.ConditionType = "ROSARoleConfigReady" + + // ReconciliationFailedReason used to report reconciliation failures. + ReconciliationFailedReason = "ReconciliationFailed" + + // ROSAControlPlaneDeletionFailedReason used to report failures while deleting ROSAControlPlane. + ROSAControlPlaneDeletionFailedReason = "DeletionFailed" + + // ROSAControlPlaneInvalidConfigurationReason used to report invalid user input. + ROSAControlPlaneInvalidConfigurationReason = "InvalidConfiguration" + + // ROSARoleConfigNotReadyReason used to report when referenced RosaRoleConfig is not ready. + ROSARoleConfigNotReadyReason = "ROSARoleConfigNotReady" + + // ROSARoleConfigNotFoundReason used to report when referenced RosaRoleConfig is not found. + ROSARoleConfigNotFoundReason = "ROSARoleConfigNotFound" +) diff --git a/controlplane/rosa/api/v1beta2/conditions_consts.go b/controlplane/rosa/api/v1beta2/conditions_consts.go index 6fd812198a..5a47112649 100644 --- a/controlplane/rosa/api/v1beta2/conditions_consts.go +++ b/controlplane/rosa/api/v1beta2/conditions_consts.go @@ -16,7 +16,8 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + const ( // ROSAControlPlaneReadyCondition condition reports on the successful reconciliation of ROSAControlPlane. ROSAControlPlaneReadyCondition clusterv1.ConditionType = "ROSAControlPlaneReady" diff --git a/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go index 3ae4eff314..360d2ce2e6 100644 --- a/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go +++ b/controlplane/rosa/api/v1beta2/rosacontrolplane_types.go @@ -21,7 +21,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // RosaEndpointAccessType specifies the publishing scope of cluster endpoints. @@ -806,7 +807,7 @@ type RosaControlPlaneStatus struct { // +optional FailureMessage *string `json:"failureMessage,omitempty"` // Conditions specifies the conditions for the managed control plane - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // ID is the cluster ID given by ROSA. ID string `json:"id,omitempty"` @@ -850,12 +851,12 @@ type ROSAControlPlaneList struct { } // GetConditions returns the control planes conditions. -func (r *ROSAControlPlane) GetConditions() clusterv1.Conditions { +func (r *ROSAControlPlane) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the status conditions for the AWSManagedControlPlane. -func (r *ROSAControlPlane) SetConditions(conditions clusterv1.Conditions) { +func (r *ROSAControlPlane) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller.go b/controlplane/rosa/controllers/rosacontrolplane_controller.go index 187365e7b6..c0a311720b 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller.go @@ -55,7 +55,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/source" + rosacontrolplanev1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta1" rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/annotations" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" @@ -65,9 +67,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/secret" @@ -108,7 +111,6 @@ func (r *ROSAControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c WithOptions(options). WithEventFilter(predicates.ResourceHasFilterLabel(mgr.GetScheme(), log.GetLogger(), r.WatchFilterValue)). Build(r) - if err != nil { return fmt.Errorf("failed setting up the ROSAControlPlane controller manager: %w", err) } @@ -116,7 +118,7 @@ func (r *ROSAControlPlaneReconciler) SetupWithManager(ctx context.Context, mgr c if err = c.Watch( source.Kind[client.Object](mgr.GetCache(), &clusterv1.Cluster{}, handler.EnqueueRequestsFromMapFunc(util.ClusterToInfrastructureMapFunc(ctx, rosaControlPlane.GroupVersionKind(), mgr.GetClient(), &expinfrav1.ROSACluster{})), - predicates.ClusterPausedTransitionsOrInfrastructureReady(mgr.GetScheme(), log.GetLogger())), + predicates.ClusterPausedTransitionsOrInfrastructureProvisioned(mgr.GetScheme(), log.GetLogger())), ); err != nil { return fmt.Errorf("failed adding a watch for ready clusters: %w", err) } @@ -239,12 +241,12 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc return ctrl.Result{}, fmt.Errorf("failed to validate ROSAControlPlane.spec: %w", err) } - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneValidCondition) + conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneValidCondition) if validationMessage != "" { conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneValidCondition, - rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, - clusterv1.ConditionSeverityError, + rosacontrolplanev1beta1.ROSAControlPlaneValidCondition, + rosacontrolplanev1beta1.ROSAControlPlaneInvalidConfigurationReason, + clusterv1beta1.ConditionSeverityError, "%s", validationMessage) // dont' requeue because input is invalid and manual intervention is needed. @@ -266,7 +268,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc switch cluster.Status().State() { case cmv1.ClusterStateReady: - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneReadyCondition) + conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition) rosaScope.ControlPlane.Status.Ready = true apiEndpoint, err := buildAPIEndpoint(cluster) @@ -300,9 +302,9 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc rosaScope.ControlPlane.Status.FailureMessage = &errorMessage conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneReadyCondition, + rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, string(cluster.Status().State()), - clusterv1.ConditionSeverityError, + clusterv1beta1.ConditionSeverityError, "%s", cluster.Status().ProvisionErrorCode()) // Cluster is in an unrecoverable state, returning nil error so that the request doesn't get requeued. @@ -310,9 +312,9 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc } conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneReadyCondition, + rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, string(cluster.Status().State()), - clusterv1.ConditionSeverityInfo, + clusterv1beta1.ConditionSeverityInfo, "%s", cluster.Status().Description()) @@ -335,7 +337,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc } // Is the referenced ROSANetwork ready yet? - if !conditions.IsTrue(rosaNet, expinfrav1.ROSANetworkReadyCondition) { + if !conditions.IsTrue(rosaNet, expinfrav1beta1.ROSANetworkReadyCondition) { rosaScope.Info(fmt.Sprintf("referenced ROSANetwork %s is not ready", rosaNet.Name)) return ctrl.Result{RequeueAfter: time.Minute}, nil } @@ -349,9 +351,9 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc cluster, err = ocmClient.CreateCluster(ocmClusterSpec) if err != nil { conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneReadyCondition, - rosacontrolplanev1.ReconciliationFailedReason, - clusterv1.ConditionSeverityError, + rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, + rosacontrolplanev1beta1.ReconciliationFailedReason, + clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to create OCM cluster: %w", err) @@ -375,25 +377,25 @@ func (r *ROSAControlPlaneReconciler) reconcileRosaRoleConfig(ctx context.Context if err := r.Client.Get(ctx, key, rosaRoleConfig); err != nil { conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSARoleConfigReadyCondition, - rosacontrolplanev1.ROSARoleConfigNotFoundReason, - clusterv1.ConditionSeverityError, + rosacontrolplanev1beta1.ROSARoleConfigReadyCondition, + rosacontrolplanev1beta1.ROSARoleConfigNotFoundReason, + clusterv1beta1.ConditionSeverityError, "Failed to get RosaRoleConfig %s/%s", rosaScope.ControlPlane.Namespace, rosaScope.ControlPlane.Spec.RosaRoleConfigRef.Name) return nil, err } // Check if RosaRoleConfig is ready - if !conditions.IsTrue(rosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition) { + if !conditions.IsTrue(rosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition) { conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSARoleConfigReadyCondition, - rosacontrolplanev1.ROSARoleConfigNotReadyReason, - clusterv1.ConditionSeverityWarning, + rosacontrolplanev1beta1.ROSARoleConfigReadyCondition, + rosacontrolplanev1beta1.ROSARoleConfigNotReadyReason, + clusterv1beta1.ConditionSeverityWarning, "RosaRoleConfig %s/%s is not ready", rosaScope.ControlPlane.Namespace, rosaScope.ControlPlane.Spec.RosaRoleConfigRef.Name) return nil, fmt.Errorf("RosaRoleConfig %s/%s is not ready", rosaScope.ControlPlane.Namespace, rosaScope.ControlPlane.Spec.RosaRoleConfigRef.Name) } - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ROSARoleConfigReadyCondition) + conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSARoleConfigReadyCondition) } else { rosaRoleConfig.Status.OIDCID = rosaScope.ControlPlane.Spec.OIDCID rosaRoleConfig.Status.AccountRolesRef.InstallerRoleARN = rosaScope.ControlPlane.Spec.InstallerRoleARN @@ -452,9 +454,9 @@ func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaSc if cluster.Status().State() != cmv1.ClusterStateUninstalling { if _, err := ocmClient.DeleteCluster(cluster.ID(), bestEffort, creator); err != nil { conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneReadyCondition, - rosacontrolplanev1.ROSAControlPlaneDeletionFailedReason, - clusterv1.ConditionSeverityError, + rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, + rosacontrolplanev1beta1.ROSAControlPlaneDeletionFailedReason, + clusterv1beta1.ConditionSeverityError, "failed to delete ROSAControlPlane: %s; if the error can't be resolved, set '%s' annotation to force the deletion", err.Error(), ROSAControlPlaneForceDeleteAnnotation) @@ -463,9 +465,9 @@ func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaSc } conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneReadyCondition, + rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, string(cluster.Status().State()), - clusterv1.ConditionSeverityInfo, + clusterv1beta1.ConditionSeverityInfo, "deleting") rosaScope.ControlPlane.Status.Ready = false rosaScope.Info("waiting for cluster to be deleted") @@ -509,7 +511,7 @@ func (r *ROSAControlPlaneReconciler) deleteMachinePools(ctx context.Context, ros func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.ROSAControlPlaneScope, ocmClient rosa.OCMClient, cluster *cmv1.Cluster) error { version := rosaScope.ControlPlane.Spec.Version if version == rosa.RawVersionID(cluster.Version()) { - conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, "upgraded", clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneUpgradingCondition, "upgraded", clusterv1beta1.ConditionSeverityInfo, "") if cluster.Version() != nil { rosaScope.ControlPlane.Status.AvailableUpgrades = cluster.Version().AvailableUpgrades() @@ -533,8 +535,8 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.RO ack := (rosaScope.ControlPlane.Spec.VersionGate == rosacontrolplanev1.Acknowledge || rosaScope.ControlPlane.Spec.VersionGate == rosacontrolplanev1.AlwaysAcknowledge) scheduledUpgrade, err = rosa.ScheduleControlPlaneUpgrade(ocmClient, cluster, version, time.Now(), ack) if err != nil { - condition := &clusterv1.Condition{ - Type: rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, + condition := &clusterv1beta1.Condition{ + Type: rosacontrolplanev1beta1.ROSAControlPlaneUpgradingCondition, Status: corev1.ConditionFalse, Reason: "failed", Message: fmt.Sprintf("failed to schedule upgrade to version %s: %v", version, err), @@ -545,8 +547,8 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.RO } } - condition := &clusterv1.Condition{ - Type: rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, + condition := &clusterv1beta1.Condition{ + Type: rosacontrolplanev1beta1.ROSAControlPlaneUpgradingCondition, Status: corev1.ConditionTrue, Reason: string(scheduledUpgrade.State().Value()), Message: fmt.Sprintf("Upgrading to version %s", scheduledUpgrade.Version()), @@ -569,9 +571,9 @@ func (r *ROSAControlPlaneReconciler) updateOCMCluster(rosaScope *scope.ROSAContr rosaScope.Info("Updating cluster") if err := ocmClient.UpdateCluster(cluster.ID(), creator, ocmClusterSpec); err != nil { conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ROSAControlPlaneValidCondition, - rosacontrolplanev1.ROSAControlPlaneInvalidConfigurationReason, - clusterv1.ConditionSeverityError, + rosacontrolplanev1beta1.ROSAControlPlaneValidCondition, + rosacontrolplanev1beta1.ROSAControlPlaneInvalidConfigurationReason, + clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return err @@ -673,13 +675,13 @@ func (r *ROSAControlPlaneReconciler) reconcileExternalAuth(ctx context.Context, if err := r.reconcileExternalAuthProviders(ctx, externalAuthClient, rosaScope, cluster); err != nil { errs = append(errs, err) conditions.MarkFalse(rosaScope.ControlPlane, - rosacontrolplanev1.ExternalAuthConfiguredCondition, - rosacontrolplanev1.ReconciliationFailedReason, - clusterv1.ConditionSeverityError, + rosacontrolplanev1beta1.ExternalAuthConfiguredCondition, + rosacontrolplanev1beta1.ReconciliationFailedReason, + clusterv1beta1.ConditionSeverityError, "%s", err.Error()) } else { - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1.ExternalAuthConfiguredCondition) + conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ExternalAuthConfiguredCondition) } if err := r.reconcileExternalAuthBootstrapKubeconfig(ctx, externalAuthClient, rosaScope, cluster); err != nil { @@ -1224,8 +1226,8 @@ func (r *ROSAControlPlaneReconciler) rosaClusterToROSAControlPlane(log *logger.L } controlPlaneRef := cluster.Spec.ControlPlaneRef - if controlPlaneRef == nil || controlPlaneRef.Kind != rosaControlPlaneKind { - log.Debug("ControlPlaneRef is nil or not ROSAControlPlane, skipping mapping") + if !controlPlaneRef.IsDefined() || controlPlaneRef.Kind != rosaControlPlaneKind { + log.Debug("ControlPlaneRef is not defined or not ROSAControlPlane, skipping mapping") return nil } @@ -1233,7 +1235,7 @@ func (r *ROSAControlPlaneReconciler) rosaClusterToROSAControlPlane(log *logger.L { NamespacedName: types.NamespacedName{ Name: controlPlaneRef.Name, - Namespace: controlPlaneRef.Namespace, + Namespace: cluster.Namespace, }, }, } diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller_test.go b/controlplane/rosa/controllers/rosacontrolplane_controller_test.go index 4ce5164e07..ac64218421 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller_test.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller_test.go @@ -53,8 +53,9 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -330,7 +331,8 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Name: "rosa-control-plane-1", Namespace: ns.Name, - UID: types.UID("rosa-control-plane-1")}, + UID: types.UID("rosa-control-plane-1"), + }, TypeMeta: metav1.TypeMeta{ Kind: "ROSAControlPlane", APIVersion: rosacontrolplanev1.GroupVersion.String(), @@ -372,7 +374,7 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { }, Status: rosacontrolplanev1.RosaControlPlaneStatus{ ID: "rosa-control-plane-1", - Conditions: clusterv1.Conditions{clusterv1.Condition{ + Conditions: clusterv1beta1.Conditions{clusterv1beta1.Condition{ Type: "Paused", Status: "False", Severity: "", @@ -389,10 +391,10 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { UID: types.UID("owner-cluster-1"), }, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - Name: rosaControlPlane.Name, - Kind: "ROSAControlPlane", - APIVersion: rosacontrolplanev1.GroupVersion.String(), + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaControlPlane.Name, + Kind: "ROSAControlPlane", + APIGroup: rosacontrolplanev1.GroupVersion.Group, }, }, } @@ -498,7 +500,7 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { g.Expect(err).ShouldNot(HaveOccurred()) rosaControlPlane.Status = rosacontrolplanev1.RosaControlPlaneStatus{ ID: "rosa-control-plane-1", - Conditions: clusterv1.Conditions{clusterv1.Condition{ + Conditions: clusterv1beta1.Conditions{clusterv1beta1.Condition{ Type: "Paused", Status: "False", Severity: "", @@ -515,7 +517,7 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { key := client.ObjectKey{Name: rosaControlPlane.Name, Namespace: rosaControlPlane.Namespace} errGet := testEnv.Get(ctx, key, cp) g.Expect(errGet).NotTo(HaveOccurred()) - oldCondition := conditions.Get(cp, clusterv1.PausedV1Beta2Condition) + oldCondition := conditions.Get(cp, clusterv1beta1.PausedV1Beta2Condition) g.Expect(oldCondition).NotTo(BeNil()) r := ROSAControlPlaneReconciler{ diff --git a/controlplane/rosa/controllers/suite_test.go b/controlplane/rosa/controllers/suite_test.go index 83a5b0e232..d0b5c1c9e5 100644 --- a/controlplane/rosa/controllers/suite_test.go +++ b/controlplane/rosa/controllers/suite_test.go @@ -32,7 +32,8 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) var ( testEnv *helpers.TestEnvironment diff --git a/exp/api/v1beta1/awsfargateprofile_types.go b/exp/api/v1beta1/awsfargateprofile_types.go index e9a76808ce..66eabc4fbb 100644 --- a/exp/api/v1beta1/awsfargateprofile_types.go +++ b/exp/api/v1beta1/awsfargateprofile_types.go @@ -23,15 +23,14 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") - -var ( - // DefaultEKSFargateRole is the name of the default IAM role to use for fargate - // profiles if no other role is supplied in the spec and if iam role creation - // is not enabled. The default can be created using clusterawsadm or created manually. - DefaultEKSFargateRole = fmt.Sprintf("eks-fargate%s", iamv1.DefaultNameSuffix) + clusterv1betav1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) +// DefaultEKSFargateRole is the name of the default IAM role to use for fargate +// profiles if no other role is supplied in the spec and if iam role creation +// is not enabled. The default can be created using clusterawsadm or created manually. +var DefaultEKSFargateRole = fmt.Sprintf("eks-fargate%s", iamv1.DefaultNameSuffix) + // FargateProfileSpec defines the desired state of FargateProfile. type FargateProfileSpec struct { // ClusterName is the name of the Cluster this object belongs to. @@ -117,7 +116,7 @@ type FargateProfileStatus struct { // Conditions defines current state of the Fargate profile. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1betav1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -138,12 +137,12 @@ type AWSFargateProfile struct { } // GetConditions returns the observations of the operational state of the AWSFargateProfile resource. -func (r *AWSFargateProfile) GetConditions() clusterv1.Conditions { +func (r *AWSFargateProfile) GetConditions() clusterv1betav1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1.Conditions. -func (r *AWSFargateProfile) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1betav1.Conditions. +func (r *AWSFargateProfile) SetConditions(conditions clusterv1betav1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/awsmachinepool_types.go b/exp/api/v1beta1/awsmachinepool_types.go index 94433c9950..c4061baf12 100644 --- a/exp/api/v1beta1/awsmachinepool_types.go +++ b/exp/api/v1beta1/awsmachinepool_types.go @@ -21,7 +21,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) // Constants block. const ( @@ -115,7 +116,7 @@ type AWSMachinePoolStatus struct { // Conditions defines current service state of the AWSMachinePool. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // Instances contains the status for each instance in the pool // +optional @@ -214,12 +215,12 @@ func init() { } // GetConditions returns the observations of the operational state of the AWSMachinePool resource. -func (r *AWSMachinePool) GetConditions() clusterv1.Conditions { +func (r *AWSMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1.Conditions. -func (r *AWSMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1beta1.Conditions. +func (r *AWSMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/awsmanagedmachinepool_types.go b/exp/api/v1beta1/awsmanagedmachinepool_types.go index f48003648f..08fdd1d0c9 100644 --- a/exp/api/v1beta1/awsmanagedmachinepool_types.go +++ b/exp/api/v1beta1/awsmanagedmachinepool_types.go @@ -23,7 +23,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) // ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool. type ManagedMachineAMIType string @@ -51,12 +52,10 @@ const ( ManagedMachinePoolCapacityTypeSpot ManagedMachinePoolCapacityType = "spot" ) -var ( - // DefaultEKSNodegroupRole is the name of the default IAM role to use for EKS nodegroups - // if no other role is supplied in the spec and if iam role creation is not enabled. The default - // can be created using clusterawsadm or created manually. - DefaultEKSNodegroupRole = fmt.Sprintf("eks-nodegroup%s", iamv1.DefaultNameSuffix) -) +// DefaultEKSNodegroupRole is the name of the default IAM role to use for EKS nodegroups +// if no other role is supplied in the spec and if iam role creation is not enabled. The default +// can be created using clusterawsadm or created manually. +var DefaultEKSNodegroupRole = fmt.Sprintf("eks-nodegroup%s", iamv1.DefaultNameSuffix) // AWSManagedMachinePoolSpec defines the desired state of AWSManagedMachinePool. type AWSManagedMachinePoolSpec struct { @@ -232,7 +231,7 @@ type AWSManagedMachinePoolStatus struct { // Conditions defines current service state of the managed machine pool // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -252,12 +251,12 @@ type AWSManagedMachinePool struct { } // GetConditions returns the observations of the operational state of the AWSManagedMachinePool resource. -func (r *AWSManagedMachinePool) GetConditions() clusterv1.Conditions { +func (r *AWSManagedMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1.Conditions. -func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1beta1.Conditions. +func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta1/conditions_consts.go b/exp/api/v1beta1/conditions_consts.go index 9f667939fa..1dcd9a20c5 100644 --- a/exp/api/v1beta1/conditions_consts.go +++ b/exp/api/v1beta1/conditions_consts.go @@ -16,10 +16,11 @@ limitations under the License. package v1beta1 -import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +import clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + const ( // ASGReadyCondition reports on current status of the autoscaling group. Ready indicates the group is provisioned. - ASGReadyCondition clusterv1.ConditionType = "ASGReady" + ASGReadyCondition clusterv1beta1.ConditionType = "ASGReady" // ASGNotFoundReason used when the autoscaling group couldn't be retrieved. ASGNotFoundReason = "ASGNotFound" // ASGProvisionFailedReason used for failures during autoscaling group provisioning. @@ -28,7 +29,7 @@ const ( ASGDeletionInProgress = "ASGDeletionInProgress" // LaunchTemplateReadyCondition represents the status of an AWSMachinePool's associated Launch Template. - LaunchTemplateReadyCondition clusterv1.ConditionType = "LaunchTemplateReady" + LaunchTemplateReadyCondition clusterv1beta1.ConditionType = "LaunchTemplateReady" // LaunchTemplateNotFoundReason is used when an associated Launch Template can't be found. LaunchTemplateNotFoundReason = "LaunchTemplateNotFound" // LaunchTemplateCreateFailedReason used for failures during Launch Template creation. @@ -37,17 +38,30 @@ const ( LaunchTemplateReconcileFailedReason = "LaunchTemplateReconcileFailed" // PreLaunchTemplateUpdateCheckCondition reports if all prerequisite are met for launch template update. - PreLaunchTemplateUpdateCheckCondition clusterv1.ConditionType = "PreLaunchTemplateUpdateCheckSuccess" + PreLaunchTemplateUpdateCheckCondition clusterv1beta1.ConditionType = "PreLaunchTemplateUpdateCheckSuccess" // PostLaunchTemplateUpdateOperationCondition reports on successfully completes post launch template update operation. - PostLaunchTemplateUpdateOperationCondition clusterv1.ConditionType = "PostLaunchTemplateUpdateOperationSuccess" + PostLaunchTemplateUpdateOperationCondition clusterv1beta1.ConditionType = "PostLaunchTemplateUpdateOperationSuccess" // PreLaunchTemplateUpdateCheckFailedReason used to report when not all prerequisite are met for launch template update. PreLaunchTemplateUpdateCheckFailedReason = "PreLaunchTemplateUpdateCheckFailed" // PostLaunchTemplateUpdateOperationFailedReason used to report when post launch template update operation failed. PostLaunchTemplateUpdateOperationFailedReason = "PostLaunchTemplateUpdateOperationFailed" + // AWSMachineCreationFailed reports if creating AWSMachines to represent ASG (machine pool) machines failed. + AWSMachineCreationFailed = "AWSMachineCreationFailed" + // AWSMachineDeletionFailed reports if deleting AWSMachines failed. + AWSMachineDeletionFailed = "AWSMachineDeletionFailed" + // LifecycleHookReadyCondition reports on the status of the lifecycle hook. + LifecycleHookReadyCondition clusterv1beta1.ConditionType = "LifecycleHookReady" + // LifecycleHookCreationFailedReason used for failures during lifecycle hook creation. + LifecycleHookCreationFailedReason = "LifecycleHookCreationFailed" + // LifecycleHookUpdateFailedReason used for failures during lifecycle hook update. + LifecycleHookUpdateFailedReason = "LifecycleHookUpdateFailed" + // LifecycleHookDeletionFailedReason used for failures during lifecycle hook deletion. + LifecycleHookDeletionFailedReason = "LifecycleHookDeletionFailed" + // InstanceRefreshStartedCondition reports on successfully starting instance refresh. - InstanceRefreshStartedCondition clusterv1.ConditionType = "InstanceRefreshStarted" + InstanceRefreshStartedCondition clusterv1beta1.ConditionType = "InstanceRefreshStarted" // InstanceRefreshNotReadyReason used to report instance refresh is not initiated. // If there are instance refreshes that are in progress, then a new instance refresh request will fail. InstanceRefreshNotReadyReason = "InstanceRefreshNotReady" @@ -57,7 +71,7 @@ const ( const ( // EKSNodegroupReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSNodegroupReadyCondition clusterv1.ConditionType = "EKSNodegroupReady" + EKSNodegroupReadyCondition clusterv1beta1.ConditionType = "EKSNodegroupReady" // EKSNodegroupReconciliationFailedReason used to report failures while reconciling EKS control plane. EKSNodegroupReconciliationFailedReason = "EKSNodegroupReconciliationFailed" // WaitingForEKSControlPlaneReason used when the machine pool is waiting for @@ -67,10 +81,10 @@ const ( const ( // EKSFargateProfileReadyCondition condition reports on the successful reconciliation of eks control plane. - EKSFargateProfileReadyCondition clusterv1.ConditionType = "EKSFargateProfileReady" + EKSFargateProfileReadyCondition clusterv1beta1.ConditionType = "EKSFargateProfileReady" // EKSFargateCreatingCondition condition reports on whether the fargate // profile is creating. - EKSFargateCreatingCondition clusterv1.ConditionType = "EKSFargateCreating" + EKSFargateCreatingCondition clusterv1beta1.ConditionType = "EKSFargateCreating" // EKSFargateDeletingCondition used to report that the profile is deleting. EKSFargateDeletingCondition = "EKSFargateDeleting" // EKSFargateReconciliationFailedReason used to report failures while reconciling EKS control plane. @@ -90,14 +104,65 @@ const ( const ( // IAMNodegroupRolesReadyCondition condition reports on the successful // reconciliation of EKS nodegroup iam roles. - IAMNodegroupRolesReadyCondition clusterv1.ConditionType = "IAMNodegroupRolesReady" + IAMNodegroupRolesReadyCondition clusterv1beta1.ConditionType = "IAMNodegroupRolesReady" // IAMNodegroupRolesReconciliationFailedReason used to report failures while // reconciling EKS nodegroup iam roles. IAMNodegroupRolesReconciliationFailedReason = "IAMNodegroupRolesReconciliationFailed" // IAMFargateRolesReadyCondition condition reports on the successful // reconciliation of EKS nodegroup iam roles. - IAMFargateRolesReadyCondition clusterv1.ConditionType = "IAMFargateRolesReady" + IAMFargateRolesReadyCondition clusterv1beta1.ConditionType = "IAMFargateRolesReady" // IAMFargateRolesReconciliationFailedReason used to report failures while // reconciling EKS nodegroup iam roles. IAMFargateRolesReconciliationFailedReason = "IAMFargateRolesReconciliationFailed" ) + +const ( + // RosaMachinePoolReadyCondition condition reports on the successful reconciliation of rosa machinepool. + RosaMachinePoolReadyCondition clusterv1beta1.ConditionType = "RosaMachinePoolReady" + // RosaMachinePoolUpgradingCondition condition reports whether ROSAMachinePool is upgrading or not. + RosaMachinePoolUpgradingCondition clusterv1beta1.ConditionType = "RosaMachinePoolUpgrading" + + // WaitingForRosaControlPlaneReason used when the machine pool is waiting for + // ROSA control plane infrastructure to be ready before proceeding. + WaitingForRosaControlPlaneReason = "WaitingForRosaControlPlane" + + // RosaMachinePoolReconciliationFailedReason used to report failures while reconciling ROSAMachinePool. + RosaMachinePoolReconciliationFailedReason = "ReconciliationFailed" +) + +const ( + // ROSANetworkReadyCondition condition reports on the successful reconciliation of ROSANetwork. + ROSANetworkReadyCondition clusterv1beta1.ConditionType = "ROSANetworkReady" + + // ROSANetworkCreatingReason used when ROSANetwork is being created. + ROSANetworkCreatingReason = "Creating" + + // ROSANetworkCreatedReason used when ROSANetwork is created. + ROSANetworkCreatedReason = "Created" + + // ROSANetworkFailedReason used when rosaNetwork creation failed. + ROSANetworkFailedReason = "Failed" + + // ROSANetworkDeletingReason used when ROSANetwork is being deleted. + ROSANetworkDeletingReason = "Deleting" + + // ROSANetworkDeletionFailedReason used to report failures while deleting ROSANetwork. + ROSANetworkDeletionFailedReason = "DeletionFailed" +) + +const ( + // RosaRoleConfigReadyCondition condition reports on the successful reconciliation of RosaRoleConfig. + RosaRoleConfigReadyCondition = "RosaRoleConfigReady" + + // RosaRoleConfigDeletionFailedReason used to report failures while deleting RosaRoleConfig. + RosaRoleConfigDeletionFailedReason = "DeletionFailed" + + // RosaRoleConfigReconciliationFailedReason used to report reconciliation failures. + RosaRoleConfigReconciliationFailedReason = "ReconciliationFailed" + + // RosaRoleConfigDeletionStarted used to indicate that the deletion of RosaRoleConfig has started. + RosaRoleConfigDeletionStarted = "DeletionStarted" + + // RosaRoleConfigCreatedReason used to indicate that the RosaRoleConfig has been created. + RosaRoleConfigCreatedReason = "Created" +) diff --git a/exp/api/v1beta1/zz_generated.conversion.go b/exp/api/v1beta1/zz_generated.conversion.go index 6b79c393af..f5593a2ac7 100644 --- a/exp/api/v1beta1/zz_generated.conversion.go +++ b/exp/api/v1beta1/zz_generated.conversion.go @@ -28,7 +28,7 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" v1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) func init() { @@ -587,7 +587,7 @@ func autoConvert_v1beta2_AWSMachinePoolSpec_To_v1beta1_AWSMachinePoolSpec(in *v1 func autoConvert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(in *AWSMachinePoolStatus, out *v1beta2.AWSMachinePoolStatus, s conversion.Scope) error { out.Ready = in.Ready out.Replicas = in.Replicas - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) out.Instances = *(*[]v1beta2.AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances)) out.LaunchTemplateID = in.LaunchTemplateID out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) @@ -605,7 +605,7 @@ func Convert_v1beta1_AWSMachinePoolStatus_To_v1beta2_AWSMachinePoolStatus(in *AW func autoConvert_v1beta2_AWSMachinePoolStatus_To_v1beta1_AWSMachinePoolStatus(in *v1beta2.AWSMachinePoolStatus, out *AWSMachinePoolStatus, s conversion.Scope) error { out.Ready = in.Ready out.Replicas = in.Replicas - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) out.Instances = *(*[]AWSMachinePoolInstanceStatus)(unsafe.Pointer(&in.Instances)) out.LaunchTemplateID = in.LaunchTemplateID out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) @@ -770,7 +770,7 @@ func autoConvert_v1beta1_AWSManagedMachinePoolStatus_To_v1beta2_AWSManagedMachin out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -786,7 +786,7 @@ func autoConvert_v1beta2_AWSManagedMachinePoolStatus_To_v1beta1_AWSManagedMachin out.LaunchTemplateVersion = (*string)(unsafe.Pointer(in.LaunchTemplateVersion)) out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -917,7 +917,7 @@ func autoConvert_v1beta1_FargateProfileStatus_To_v1beta2_FargateProfileStatus(in out.Ready = in.Ready out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } @@ -930,7 +930,7 @@ func autoConvert_v1beta2_FargateProfileStatus_To_v1beta1_FargateProfileStatus(in out.Ready = in.Ready out.FailureReason = (*string)(unsafe.Pointer(in.FailureReason)) out.FailureMessage = (*string)(unsafe.Pointer(in.FailureMessage)) - out.Conditions = *(*apiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) + out.Conditions = *(*corev1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) return nil } diff --git a/exp/api/v1beta1/zz_generated.deepcopy.go b/exp/api/v1beta1/zz_generated.deepcopy.go index 063e242516..3919507c2d 100644 --- a/exp/api/v1beta1/zz_generated.deepcopy.go +++ b/exp/api/v1beta1/zz_generated.deepcopy.go @@ -23,7 +23,7 @@ package v1beta1 import ( runtime "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - apiv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -263,7 +263,7 @@ func (in *AWSMachinePoolStatus) DeepCopyInto(out *AWSMachinePoolStatus) { *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -490,7 +490,7 @@ func (in *AWSManagedMachinePoolStatus) DeepCopyInto(out *AWSManagedMachinePoolSt } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -632,7 +632,7 @@ func (in *FargateProfileStatus) DeepCopyInto(out *FargateProfileStatus) { } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make(apiv1beta1.Conditions, len(*in)) + *out = make(corev1beta1.Conditions, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/exp/api/v1beta2/awsfargateprofile_types.go b/exp/api/v1beta2/awsfargateprofile_types.go index a2660a15b5..453fd4b724 100644 --- a/exp/api/v1beta2/awsfargateprofile_types.go +++ b/exp/api/v1beta2/awsfargateprofile_types.go @@ -23,7 +23,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) var ( // DefaultEKSFargateRole is the name of the default IAM role to use for fargate @@ -141,7 +142,7 @@ type FargateProfileStatus struct { // Conditions defines current state of the Fargate profile. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -162,12 +163,12 @@ type AWSFargateProfile struct { } // GetConditions returns the observations of the operational state of the AWSFargateProfile resource. -func (r *AWSFargateProfile) GetConditions() clusterv1.Conditions { +func (r *AWSFargateProfile) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1.Conditions. -func (r *AWSFargateProfile) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSFargateProfile to the predescribed clusterv1beta1.Conditions. +func (r *AWSFargateProfile) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/awsfargateprofile_webhook.go b/exp/api/v1beta2/awsfargateprofile_webhook.go index e9f11e052e..0bd7932f7a 100644 --- a/exp/api/v1beta2/awsfargateprofile_webhook.go +++ b/exp/api/v1beta2/awsfargateprofile_webhook.go @@ -30,7 +30,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) const ( maxProfileNameLength = 100 diff --git a/exp/api/v1beta2/awsfargateprofile_webhook_test.go b/exp/api/v1beta2/awsfargateprofile_webhook_test.go index 1d83f6f46a..90aaf5f239 100644 --- a/exp/api/v1beta2/awsfargateprofile_webhook_test.go +++ b/exp/api/v1beta2/awsfargateprofile_webhook_test.go @@ -27,7 +27,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" utildefaulting "sigs.k8s.io/cluster-api-provider-aws/v2/util/defaulting" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) func TestAWSFargateProfileDefault(t *testing.T) { fargate := &AWSFargateProfile{ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: "default"}, diff --git a/exp/api/v1beta2/awsmachinepool_types.go b/exp/api/v1beta2/awsmachinepool_types.go index aaacb0728b..a5eb28ee20 100644 --- a/exp/api/v1beta2/awsmachinepool_types.go +++ b/exp/api/v1beta2/awsmachinepool_types.go @@ -23,7 +23,8 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) // Constants block. const ( @@ -203,7 +204,7 @@ type AWSMachinePoolStatus struct { // Conditions defines current service state of the AWSMachinePool. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // Instances contains the status for each instance in the pool // +optional @@ -305,12 +306,12 @@ func init() { } // GetConditions returns the observations of the operational state of the AWSMachinePool resource. -func (r *AWSMachinePool) GetConditions() clusterv1.Conditions { +func (r *AWSMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1.Conditions. -func (r *AWSMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSMachinePool to the predescribed clusterv1beta1.Conditions. +func (r *AWSMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/awsmanagedmachinepool_types.go b/exp/api/v1beta2/awsmanagedmachinepool_types.go index 830493cbc4..20f52b4707 100644 --- a/exp/api/v1beta2/awsmanagedmachinepool_types.go +++ b/exp/api/v1beta2/awsmanagedmachinepool_types.go @@ -23,7 +23,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" iamv1 "sigs.k8s.io/cluster-api-provider-aws/v2/iam/api/v1beta1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) // ManagedMachineAMIType specifies which AWS AMI to use for a managed MachinePool. // Source of truth can be found using the link below: @@ -293,7 +294,7 @@ type AWSManagedMachinePoolStatus struct { // Conditions defines current service state of the managed machine pool // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -313,12 +314,12 @@ type AWSManagedMachinePool struct { } // GetConditions returns the observations of the operational state of the AWSManagedMachinePool resource. -func (r *AWSManagedMachinePool) GetConditions() clusterv1.Conditions { +func (r *AWSManagedMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1.Conditions. -func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the AWSManagedMachinePool to the predescribed clusterv1beta1.Conditions. +func (r *AWSManagedMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/conditions_consts.go b/exp/api/v1beta2/conditions_consts.go index 8b01761abd..a699b6a4f0 100644 --- a/exp/api/v1beta2/conditions_consts.go +++ b/exp/api/v1beta2/conditions_consts.go @@ -16,7 +16,8 @@ limitations under the License. package v1beta2 -import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +import clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + const ( // ASGReadyCondition reports on current status of the autoscaling group. Ready indicates the group is provisioned. ASGReadyCondition clusterv1.ConditionType = "ASGReady" diff --git a/exp/api/v1beta2/rosacluster_types.go b/exp/api/v1beta2/rosacluster_types.go index 5332382192..aa909c08b3 100644 --- a/exp/api/v1beta2/rosacluster_types.go +++ b/exp/api/v1beta2/rosacluster_types.go @@ -19,7 +19,9 @@ package v1beta2 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) // ROSAClusterSpec defines the desired state of ROSACluster. type ROSAClusterSpec struct { @@ -36,11 +38,11 @@ type ROSAClusterStatus struct { // FailureDomains specifies a list fo available availability zones that can be used // +optional - FailureDomains clusterv1.FailureDomains `json:"failureDomains,omitempty"` + FailureDomains map[string]clusterv1.FailureDomain `json:"failureDomains,omitempty"` // Conditions defines current service state of the ROSACluster. // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -71,13 +73,13 @@ type ROSAClusterList struct { // GetConditions returns the observations of the operational state of the // ROSACluster resource. -func (r *ROSACluster) GetConditions() clusterv1.Conditions { +func (r *ROSACluster) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } // SetConditions sets the underlying service state of the ROSACluster to the -// predescribed clusterv1.Conditions. -func (r *ROSACluster) SetConditions(conditions clusterv1.Conditions) { +// predescribed clusterv1beta1.Conditions. +func (r *ROSACluster) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/rosamachinepool_types.go b/exp/api/v1beta2/rosamachinepool_types.go index b57ff7ea3b..9c379586c6 100644 --- a/exp/api/v1beta2/rosamachinepool_types.go +++ b/exp/api/v1beta2/rosamachinepool_types.go @@ -23,7 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // RosaMachinePoolSpec defines the desired state of RosaMachinePool. @@ -206,7 +206,7 @@ type RosaMachinePoolStatus struct { Replicas int32 `json:"replicas"` // Conditions defines current service state of the managed machine pool // +optional - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` // FailureMessage will be set in the event that there is a terminal problem // reconciling the state and will be set to a descriptive error message. // @@ -252,12 +252,12 @@ type ROSAMachinePoolList struct { } // GetConditions returns the observations of the operational state of the RosaMachinePool resource. -func (r *ROSAMachinePool) GetConditions() clusterv1.Conditions { +func (r *ROSAMachinePool) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the RosaMachinePool to the predescribed clusterv1.Conditions. -func (r *ROSAMachinePool) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the RosaMachinePool to the predescribed clusterv1beta1.Conditions. +func (r *ROSAMachinePool) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/rosanetwork_types.go b/exp/api/v1beta2/rosanetwork_types.go index e1228bf2a5..389292bdf1 100644 --- a/exp/api/v1beta2/rosanetwork_types.go +++ b/exp/api/v1beta2/rosanetwork_types.go @@ -20,7 +20,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // ROSANetworkFinalizer allows the controller to clean up resources on delete. @@ -107,7 +107,7 @@ type ROSANetworkStatus struct { Resources []CFResource `json:"resources,omitempty"` // Conditions specifies the conditions for ROSANetwork - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // +kubebuilder:object:root=true @@ -134,12 +134,12 @@ type ROSANetworkList struct { } // GetConditions returns the observations of the operational state of the ROSANetwork resource. -func (r *ROSANetwork) GetConditions() clusterv1.Conditions { +func (r *ROSANetwork) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } -// SetConditions sets the underlying service state of the ROSANetwork to the predescribed clusterv1.Conditions. -func (r *ROSANetwork) SetConditions(conditions clusterv1.Conditions) { +// SetConditions sets the underlying service state of the ROSANetwork to the predescribed clusterv1beta1.Conditions. +func (r *ROSANetwork) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } diff --git a/exp/api/v1beta2/rosaroleconfig_types.go b/exp/api/v1beta2/rosaroleconfig_types.go index 05056e5887..41e0c21add 100644 --- a/exp/api/v1beta2/rosaroleconfig_types.go +++ b/exp/api/v1beta2/rosaroleconfig_types.go @@ -22,7 +22,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" ) // OidcProviderType set to Managed or UnManaged @@ -191,7 +191,7 @@ type ROSARoleConfigStatus struct { OperatorRolesRef rosacontrolplanev1.AWSRolesRef `json:"operatorRolesRef,omitempty"` // Conditions specifies the ROSARoleConfig conditions - Conditions clusterv1.Conditions `json:"conditions,omitempty"` + Conditions clusterv1beta1.Conditions `json:"conditions,omitempty"` } // AccountRolesRef defscribes ARNs used as Account roles. @@ -229,12 +229,12 @@ type ROSARoleConfigList struct { } // SetConditions sets the conditions of the ROSARoleConfig. -func (r *ROSARoleConfig) SetConditions(conditions clusterv1.Conditions) { +func (r *ROSARoleConfig) SetConditions(conditions clusterv1beta1.Conditions) { r.Status.Conditions = conditions } // GetConditions returns the observations of the operational state of the RosaNetwork resource. -func (r *ROSARoleConfig) GetConditions() clusterv1.Conditions { +func (r *ROSARoleConfig) GetConditions() clusterv1beta1.Conditions { return r.Status.Conditions } diff --git a/exp/api/v1beta2/zz_generated.deepcopy.go b/exp/api/v1beta2/zz_generated.deepcopy.go index d2fe08ef4d..094bc9a896 100644 --- a/exp/api/v1beta2/zz_generated.deepcopy.go +++ b/exp/api/v1beta2/zz_generated.deepcopy.go @@ -28,6 +28,7 @@ import ( apiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" rosaapiv1beta2 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" "sigs.k8s.io/cluster-api/api/core/v1beta1" + corev1beta2 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -1110,7 +1111,7 @@ func (in *ROSAClusterStatus) DeepCopyInto(out *ROSAClusterStatus) { *out = *in if in.FailureDomains != nil { in, out := &in.FailureDomains, &out.FailureDomains - *out = make(v1beta1.FailureDomains, len(*in)) + *out = make(map[string]corev1beta2.FailureDomain, len(*in)) for key, val := range *in { (*out)[key] = *val.DeepCopy() } diff --git a/exp/controlleridentitycreator/suite_test.go b/exp/controlleridentitycreator/suite_test.go index 20bcc1a66c..d10dac2ec2 100644 --- a/exp/controlleridentitycreator/suite_test.go +++ b/exp/controlleridentitycreator/suite_test.go @@ -30,7 +30,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) // These tests use Ginkgo (BDD-style Go testing framework). Refer to // http://onsi.github.io/ginkgo/ to learn more about Ginkgo. diff --git a/exp/controllers/awsfargatepool_controller.go b/exp/controllers/awsfargatepool_controller.go index ad6c6c20a4..1f98399741 100644 --- a/exp/controllers/awsfargatepool_controller.go +++ b/exp/controllers/awsfargatepool_controller.go @@ -31,13 +31,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -112,9 +114,9 @@ func (r *AWSFargateProfileReconciler) Reconcile(ctx context.Context, req ctrl.Re } defer func() { - applicableConditions := []clusterv1.ConditionType{ - expinfrav1.IAMFargateRolesReadyCondition, - expinfrav1.EKSFargateProfileReadyCondition, + applicableConditions := []clusterv1beta1.ConditionType{ + expinfrav1beta1.IAMFargateRolesReadyCondition, + expinfrav1beta1.EKSFargateProfileReadyCondition, } conditions.SetSummary(fargateProfileScope.FargateProfile, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) @@ -126,7 +128,7 @@ func (r *AWSFargateProfileReconciler) Reconcile(ctx context.Context, req ctrl.Re if !controlPlane.Status.Ready { log.Info("Control plane is not ready yet") - conditions.MarkFalse(fargateProfile, clusterv1.ReadyCondition, expinfrav1.WaitingForEKSControlPlaneReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(fargateProfile, clusterv1.ReadyCondition, expinfrav1beta1.WaitingForEKSControlPlaneReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } diff --git a/exp/controllers/awsmachinepool_controller.go b/exp/controllers/awsmachinepool_controller.go index 4dec26b89c..ef40e218cf 100644 --- a/exp/controllers/awsmachinepool_controller.go +++ b/exp/controllers/awsmachinepool_controller.go @@ -41,9 +41,11 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "sigs.k8s.io/controller-runtime/pkg/reconcile" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/controllers" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" @@ -53,10 +55,11 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -183,12 +186,12 @@ func (r *AWSMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Reque // set Ready condition before AWSMachinePool is patched conditions.SetSummary(machinePoolScope.AWSMachinePool, conditions.WithConditions( - expinfrav1.ASGReadyCondition, - expinfrav1.LaunchTemplateReadyCondition, + expinfrav1beta1.ASGReadyCondition, + expinfrav1beta1.LaunchTemplateReadyCondition, ), conditions.WithStepCounterIfOnly( - expinfrav1.ASGReadyCondition, - expinfrav1.LaunchTemplateReadyCondition, + expinfrav1beta1.ASGReadyCondition, + expinfrav1beta1.LaunchTemplateReadyCondition, ), ) @@ -277,16 +280,16 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP } } - if !machinePoolScope.Cluster.Status.InfrastructureReady { + if !*machinePoolScope.Cluster.Status.Initialization.InfrastructureProvisioned { machinePoolScope.Info("Cluster infrastructure is not ready yet") - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, infrav1beta1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated if machinePoolScope.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { machinePoolScope.Info("Bootstrap data secret reference is not yet available") - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, infrav1beta1.WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -298,7 +301,7 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP // Find existing ASG asg, err := r.findASG(machinePoolScope, asgsvc) if err != nil { - conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGNotFoundReason, "%s", err.Error()) + conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, expinfrav1beta1.ASGNotFoundReason, "%s", err.Error()) return ctrl.Result{}, err } @@ -351,12 +354,12 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP } // set the LaunchTemplateReady condition - conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.LaunchTemplateReadyCondition) + conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1beta1.LaunchTemplateReadyCondition) if asg == nil { // Create new ASG if err := r.createPool(machinePoolScope, clusterScope); err != nil { - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGProvisionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, expinfrav1beta1.ASGProvisionFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, err } return ctrl.Result{ @@ -372,13 +375,13 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP if err := createAWSMachinesIfNotExists(ctx, awsMachineList, machinePoolScope.MachinePool, &machinePoolScope.AWSMachinePool.ObjectMeta, &machinePoolScope.AWSMachinePool.TypeMeta, asg, machinePoolScope.GetLogger(), r.Client, ec2Svc); err != nil { machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineCreationFailed, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineCreationFailed, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to create awsmachines: %w", err) } if err := deleteOrphanedAWSMachines(ctx, awsMachineList, asg, machinePoolScope.GetLogger(), r.Client); err != nil { machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineDeletionFailed, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineDeletionFailed, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to clean up awsmachines: %w", err) } } @@ -436,7 +439,7 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP machinePoolScope.AWSMachinePool.Spec.ProviderIDList = providerIDList machinePoolScope.AWSMachinePool.Status.Replicas = int32(len(providerIDList)) //#nosec G115 machinePoolScope.AWSMachinePool.Status.Ready = true - conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition) + conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition) err = machinePoolScope.UpdateInstanceStatuses(ctx, asg.Instances) if err != nil { @@ -475,14 +478,14 @@ func (r *AWSMachinePoolReconciler) reconcileDelete(ctx context.Context, machineP if asg == nil { machinePoolScope.Warn("Unable to locate ASG") - r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, expinfrav1.ASGNotFoundReason, "Unable to find matching ASG") + r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, expinfrav1beta1.ASGNotFoundReason, "Unable to find matching ASG") } else { machinePoolScope.SetASGStatus(asg.Status) switch asg.Status { case expinfrav1.ASGStatusDeleteInProgress: // ASG is already deleting machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1.ASGReadyCondition, expinfrav1.ASGDeletionInProgress, clusterv1.ConditionSeverityWarning, "") + conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, expinfrav1.ASGDeletionInProgress, clusterv1beta1.ConditionSeverityWarning, "") r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "DeletionInProgress", "ASG deletion in progress: %q", asg.Name) machinePoolScope.Info("ASG is already deleting", "name", asg.Name) default: @@ -502,7 +505,7 @@ func (r *AWSMachinePoolReconciler) reconcileDelete(ctx context.Context, machineP if launchTemplate == nil { machinePoolScope.Debug("Unable to locate launch template") - r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, expinfrav1.ASGNotFoundReason, "Unable to find matching ASG") + r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeNormal, expinfrav1beta1.ASGNotFoundReason, "Unable to find matching ASG") controllerutil.RemoveFinalizer(machinePoolScope.AWSMachinePool, expinfrav1.MachinePoolFinalizer) return nil } @@ -697,7 +700,7 @@ func machinePoolToInfrastructureMapFunc(gvk schema.GroupVersionKind) handler.Map gk := gvk.GroupKind() // Return early if the GroupKind doesn't match what we expect - infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupVersionKind().GroupKind() + infraGK := m.Spec.Template.Spec.InfrastructureRef.GroupKind() if gk != infraGK { return nil } @@ -725,7 +728,7 @@ func (r *AWSMachinePoolReconciler) getInfraCluster(ctx context.Context, log *log var managedControlPlaneScope *scope.ManagedControlPlaneScope var err error - if cluster.Spec.ControlPlaneRef != nil && cluster.Spec.ControlPlaneRef.Kind == controllers.AWSManagedControlPlaneRefKind { + if cluster.Spec.ControlPlaneRef.IsDefined() && cluster.Spec.ControlPlaneRef.Kind == controllers.AWSManagedControlPlaneRefKind { controlPlane := &ekscontrolplanev1.AWSManagedControlPlane{} controlPlaneName := client.ObjectKey{ Namespace: awsMachinePool.Namespace, diff --git a/exp/controllers/awsmachinepool_controller_test.go b/exp/controllers/awsmachinepool_controller_test.go index 694e3cd6dc..11d183dcf8 100644 --- a/exp/controllers/awsmachinepool_controller_test.go +++ b/exp/controllers/awsmachinepool_controller_test.go @@ -43,7 +43,9 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" @@ -55,8 +57,9 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/labels/format" "sigs.k8s.io/cluster-api/util/patch" ) @@ -146,7 +149,9 @@ func TestAWSMachinePoolReconciler(t *testing.T) { Client: testEnv.Client, Cluster: &clusterv1.Cluster{ Status: clusterv1.ClusterStatus{ - InfrastructureReady: true, + Initialization: clusterv1.ClusterInitializationStatus{ + InfrastructureProvisioned: ptr.To(true), + }, }, }, MachinePool: &clusterv1.MachinePool{ @@ -262,7 +267,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { defer teardown(t, g) getASG(t, g) - ms.Cluster.Status.InfrastructureReady = false + ms.Cluster.Status.Initialization.InfrastructureProvisioned = ptr.To(false) buf := new(bytes.Buffer) klog.SetOutput(buf) @@ -270,7 +275,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { _, err := reconciler.reconcileNormal(context.Background(), ms, cs, cs, cs) g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Cluster infrastructure is not ready yet")) - expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.ASGReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForClusterInfrastructureReason}}) + expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1beta1.ASGReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1beta1.WaitingForClusterInfrastructureReason}}) }) t.Run("should exit immediately if bootstrap data secret reference isn't available", func(t *testing.T) { g := NewWithT(t) @@ -286,7 +291,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Bootstrap data secret reference is not yet available")) - expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1.ASGReadyCondition, corev1.ConditionFalse, clusterv1.ConditionSeverityInfo, infrav1.WaitingForBootstrapDataReason}}) + expectConditions(g, ms.AWSMachinePool, []conditionAssertion{{expinfrav1beta1.ASGReadyCondition, corev1.ConditionFalse, clusterv1beta1.ConditionSeverityInfo, infrav1beta1.WaitingForBootstrapDataReason}}) }) }) t.Run("there's a provider ID", func(t *testing.T) { @@ -1165,7 +1170,7 @@ func TestAWSMachinePoolReconciler(t *testing.T) { g.Expect(err).To(BeNil()) g.Expect(buf.String()).To(ContainSubstring("Unable to locate ASG")) g.Expect(ms.AWSMachinePool.Finalizers).To(ConsistOf(metav1.FinalizerDeleteDependents)) - g.Eventually(recorder.Events).Should(Receive(ContainSubstring(expinfrav1.ASGNotFoundReason))) + g.Eventually(recorder.Events).Should(Receive(ContainSubstring(expinfrav1beta1.ASGNotFoundReason))) }) t.Run("should cause AWSMachinePool to go into NotReady", func(t *testing.T) { g := NewWithT(t) @@ -1378,9 +1383,9 @@ func TestAWSMachinePoolReconciler(t *testing.T) { } type conditionAssertion struct { - conditionType clusterv1.ConditionType + conditionType clusterv1beta1.ConditionType status corev1.ConditionStatus - severity clusterv1.ConditionSeverity + severity clusterv1beta1.ConditionSeverity reason string } @@ -1476,7 +1481,7 @@ func TestDiffASG(t *testing.T) { args: args{ machinePoolScope: &scope.MachinePoolScope{ MachinePool: &clusterv1.MachinePool{ - Spec: clusterv1nePoolSpec{ + Spec: clusterv1.MachinePoolSpec{ Replicas: ptr.To[int32](1), }, }, diff --git a/exp/controllers/awsmachinepool_machines.go b/exp/controllers/awsmachinepool_machines.go index 0b27ee5a09..b38789d154 100644 --- a/exp/controllers/awsmachinepool_machines.go +++ b/exp/controllers/awsmachinepool_machines.go @@ -17,7 +17,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ec2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/labels/format" ) diff --git a/exp/controllers/awsmanagedmachinepool_controller.go b/exp/controllers/awsmanagedmachinepool_controller.go index 62bdc8b02f..65302fac4f 100644 --- a/exp/controllers/awsmanagedmachinepool_controller.go +++ b/exp/controllers/awsmanagedmachinepool_controller.go @@ -37,6 +37,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" @@ -44,9 +45,10 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -151,7 +153,7 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr if !controlPlane.Status.Ready { log.Info("Control plane is not ready yet") - conditions.MarkFalse(awsPool, expinfrav1.EKSNodegroupReadyCondition, expinfrav1.WaitingForEKSControlPlaneReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(awsPool, expinfrav1beta1.EKSNodegroupReadyCondition, expinfrav1beta1.WaitingForEKSControlPlaneReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -173,10 +175,10 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr } defer func() { - applicableConditions := []clusterv1.ConditionType{ - expinfrav1.EKSNodegroupReadyCondition, - expinfrav1.IAMNodegroupRolesReadyCondition, - expinfrav1.LaunchTemplateReadyCondition, + applicableConditions := []clusterv1beta1.ConditionType{ + expinfrav1beta1.EKSNodegroupReadyCondition, + expinfrav1beta1.IAMNodegroupRolesReadyCondition, + expinfrav1beta1.LaunchTemplateReadyCondition, } conditions.SetSummary(machinePoolScope.ManagedMachinePool, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) @@ -226,7 +228,7 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( if err != nil { r.Recorder.Eventf(machinePoolScope.ManagedMachinePool, corev1.EventTypeWarning, "FailedLaunchTemplateReconcile", "Failed to reconcile launch template: %v", err) machinePoolScope.Error(err, "failed to reconcile launch template") - conditions.MarkFalse(machinePoolScope.ManagedMachinePool, expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "") + conditions.MarkFalse(machinePoolScope.ManagedMachinePool, expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "") return ctrl.Result{}, err } if res != nil { @@ -243,7 +245,7 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( } // set the LaunchTemplateReady condition - conditions.MarkTrue(machinePoolScope.ManagedMachinePool, expinfrav1.LaunchTemplateReadyCondition) + conditions.MarkTrue(machinePoolScope.ManagedMachinePool, expinfrav1beta1.LaunchTemplateReadyCondition) } if err := ekssvc.ReconcilePool(ctx); err != nil { diff --git a/exp/controllers/rosamachinepool_controller.go b/exp/controllers/rosamachinepool_controller.go index 00388ffc3d..464222166a 100644 --- a/exp/controllers/rosamachinepool_controller.go +++ b/exp/controllers/rosamachinepool_controller.go @@ -29,6 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/exp/utils" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" @@ -37,10 +38,11 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/util/paused" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -137,8 +139,9 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ } controllerutil.RemoveFinalizer(rosaMachinePool, expinfrav1.RosaMachinePoolFinalizer) - return ctrl.Result{}, patchHelper.Patch(ctx, rosaMachinePool, patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - expinfrav1.RosaMachinePoolReadyCondition}}) + return ctrl.Result{}, patchHelper.Patch(ctx, rosaMachinePool, patch.WithOwnedConditions{Conditions: []string{ + string(expinfrav1beta1.RosaMachinePoolReadyCondition), + }}) } log.Info("Failed to retrieve ControlPlane from MachinePool") @@ -171,12 +174,12 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ if !controlPlane.Status.Ready && controlPlane.ObjectMeta.DeletionTimestamp.IsZero() { log.Info("Control plane is not ready yet") - err := machinePoolScope.RosaMachinePoolReadyFalse(expinfrav1.WaitingForRosaControlPlaneReason, "") + err := machinePoolScope.RosaMachinePoolReadyFalse(expinfrav1beta1.WaitingForRosaControlPlaneReason, "") return ctrl.Result{}, err } defer func() { - conditions.SetSummary(machinePoolScope.RosaMachinePool, conditions.WithConditions(expinfrav1.RosaMachinePoolReadyCondition), conditions.WithStepCounter()) + conditions.SetSummary(machinePoolScope.RosaMachinePool, conditions.WithConditions(expinfrav1beta1.RosaMachinePoolReadyCondition), conditions.WithStepCounter()) if err := machinePoolScope.Close(); err != nil && reterr == nil { reterr = err @@ -268,7 +271,7 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, rosaMachinePool.Status.Replicas = currentReplicas if rosa.IsNodePoolReady(nodePool) { - conditions.MarkTrue(rosaMachinePool, expinfrav1.RosaMachinePoolReadyCondition) + conditions.MarkTrue(rosaMachinePool, expinfrav1beta1.RosaMachinePoolReadyCondition) rosaMachinePool.Status.Ready = true if err := r.reconcileMachinePoolVersion(machinePoolScope, ocmClient, nodePool); err != nil { @@ -279,9 +282,9 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, } conditions.MarkFalse(rosaMachinePool, - expinfrav1.RosaMachinePoolReadyCondition, + expinfrav1beta1.RosaMachinePoolReadyCondition, nodePool.Status().Message(), - clusterv1.ConditionSeverityInfo, + clusterv1beta1.ConditionSeverityInfo, "") machinePoolScope.Info("waiting for NodePool to become ready", "state", nodePool.Status().Message()) @@ -298,9 +301,9 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, nodePool, err = ocmClient.CreateNodePool(machinePoolScope.ControlPlane.Status.ID, nodePoolSpec) if err != nil { conditions.MarkFalse(rosaMachinePool, - expinfrav1.RosaMachinePoolReadyCondition, - expinfrav1.RosaMachinePoolReconciliationFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.RosaMachinePoolReadyCondition, + expinfrav1beta1.RosaMachinePoolReconciliationFailedReason, + clusterv1beta1.ConditionSeverityError, "failed to create ROSAMachinePool: %s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to create nodepool: %w", err) } @@ -341,7 +344,7 @@ func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope version := machinePoolScope.RosaMachinePool.Spec.Version if version == "" || version == rosa.RawVersionID(nodePool.Version()) { machinePoolScope.RosaMachinePool.Status.AvailableUpgrades = nodePool.Version().AvailableUpgrades() - conditions.MarkFalse(machinePoolScope.RosaMachinePool, expinfrav1.RosaMachinePoolUpgradingCondition, "upgraded", clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(machinePoolScope.RosaMachinePool, expinfrav1beta1.RosaMachinePoolUpgradingCondition, "upgraded", clusterv1beta1.ConditionSeverityInfo, "") return nil } @@ -358,8 +361,8 @@ func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope } } - condition := &clusterv1.Condition{ - Type: expinfrav1.RosaMachinePoolUpgradingCondition, + condition := &clusterv1beta1.Condition{ + Type: expinfrav1beta1.RosaMachinePoolUpgradingCondition, Status: corev1.ConditionTrue, Reason: string(scheduledUpgrade.State().Value()), Message: fmt.Sprintf("Upgrading to version %s", scheduledUpgrade.Version()), @@ -410,9 +413,9 @@ func (r *ROSAMachinePoolReconciler) updateNodePool(machinePoolScope *scope.RosaM updatedNodePool, err := ocmClient.UpdateNodePool(machinePoolScope.ControlPlane.Status.ID, nodePoolSpec) if err != nil { conditions.MarkFalse(machinePoolScope.RosaMachinePool, - expinfrav1.RosaMachinePoolReadyCondition, - expinfrav1.RosaMachinePoolReconciliationFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.RosaMachinePoolReadyCondition, + expinfrav1beta1.RosaMachinePoolReconciliationFailedReason, + clusterv1beta1.ConditionSeverityError, "failed to update ROSAMachinePool: %s", err.Error()) return nil, fmt.Errorf("failed to update nodePool: %w", err) } diff --git a/exp/controllers/rosamachinepool_controller_test.go b/exp/controllers/rosamachinepool_controller_test.go index 3ac08cb087..ffb744e083 100644 --- a/exp/controllers/rosamachinepool_controller_test.go +++ b/exp/controllers/rosamachinepool_controller_test.go @@ -30,7 +30,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/patch" ) @@ -113,7 +114,8 @@ func TestRosaMachinePoolReconcile(t *testing.T) { return &rosacontrolplanev1.ROSAControlPlane{ ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("rosa-control-plane-%v", i), - Namespace: ns.Name}, + Namespace: ns.Name, + }, TypeMeta: metav1.TypeMeta{ Kind: "ROSAControlPlane", APIVersion: rosacontrolplanev1.GroupVersion.String(), @@ -168,10 +170,10 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Namespace: ns.Name, }, Spec: clusterv1.ClusterSpec{ - ControlPlaneRef: &corev1.ObjectReference{ - Name: rosaControlPlane(i).Name, - Kind: "ROSAControlPlane", - APIVersion: rosacontrolplanev1.GroupVersion.String(), + ControlPlaneRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaControlPlane(i).Name, + Kind: "ROSAControlPlane", + APIGroup: rosacontrolplanev1.GroupVersion.Group, }, }, } @@ -214,12 +216,10 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: fmt.Sprintf("owner-cluster-%v", i), - InfrastructureRef: corev1.ObjectReference{ - UID: rosaMachinePool(i).UID, - Name: rosaMachinePool(i).Name, - Namespace: ns.Namespace, - Kind: "ROSAMachinePool", - APIVersion: clusterv1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaMachinePool(i).Name, + Kind: "ROSAMachinePool", + APIGroup: clusterv1.GroupVersion.Group, }, }, }, @@ -376,12 +376,10 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: ownerCluster(3).Name, - InfrastructureRef: corev1.ObjectReference{ - UID: rosaMachinePool(3).UID, - Name: rosaMachinePool(3).Name, - Namespace: ns.Namespace, - Kind: "ROSAMachinePool", - APIVersion: clusterv1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaMachinePool(3).Name, + Kind: "ROSAMachinePool", + APIGroup: clusterv1.GroupVersion.Group, }, }, }, @@ -438,12 +436,10 @@ func TestRosaMachinePoolReconcile(t *testing.T) { Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: ownerCluster(4).Name, - InfrastructureRef: corev1.ObjectReference{ - UID: rosaMachinePool(4).UID, - Name: rosaMachinePool(4).Name, - Namespace: ns.Namespace, - Kind: "ROSAMachinePool", - APIVersion: clusterv1.GroupVersion.String(), + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Name: rosaMachinePool(4).Name, + Kind: "ROSAMachinePool", + APIGroup: clusterv1.GroupVersion.Group, }, }, }, @@ -535,7 +531,7 @@ func TestRosaMachinePoolReconcile(t *testing.T) { // patch status conditions rmpPh, err := patch.NewHelper(test.oldROSAMachinePool, testEnv) - test.oldROSAMachinePool.Status.Conditions = clusterv1.Conditions{ + test.oldROSAMachinePool.Status.Conditions = clusterv1beta1.Conditions{ { Type: "Paused", Status: corev1.ConditionFalse, diff --git a/exp/controllers/rosanetwork_controller.go b/exp/controllers/rosanetwork_controller.go index 2859eaecb3..8d6c5824ff 100644 --- a/exp/controllers/rosanetwork_controller.go +++ b/exp/controllers/rosanetwork_controller.go @@ -40,11 +40,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -151,17 +152,17 @@ func (r *ROSANetworkReconciler) reconcileNormal(ctx context.Context, rosaNetScop _, err := r.awsClient.CreateStackWithParamsTags(ctx, templateBody, rosaNetScope.ROSANetwork.Spec.StackName, cfParams, rosaNetScope.ROSANetwork.Spec.StackTags) if err != nil { conditions.MarkFalse(rosaNetScope.ROSANetwork, - expinfrav1.ROSANetworkReadyCondition, - expinfrav1.ROSANetworkFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.ROSANetworkReadyCondition, + expinfrav1beta1.ROSANetworkFailedReason, + clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to start CF stack creation: %w", err) } conditions.MarkFalse(rosaNetScope.ROSANetwork, - expinfrav1.ROSANetworkReadyCondition, - expinfrav1.ROSANetworkCreatingReason, - clusterv1.ConditionSeverityInfo, + expinfrav1beta1.ROSANetworkReadyCondition, + expinfrav1beta1.ROSANetworkCreatingReason, + clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -175,9 +176,9 @@ func (r *ROSANetworkReconciler) reconcileNormal(ctx context.Context, rosaNetScop case cloudformationtypes.StackStatusCreateInProgress: // Create in progress // Set the reason of false ROSANetworkReadyCondition to Creating conditions.MarkFalse(rosaNetScope.ROSANetwork, - expinfrav1.ROSANetworkReadyCondition, - expinfrav1.ROSANetworkCreatingReason, - clusterv1.ConditionSeverityInfo, + expinfrav1beta1.ROSANetworkReadyCondition, + expinfrav1beta1.ROSANetworkCreatingReason, + clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{RequeueAfter: time.Second * 60}, nil case cloudformationtypes.StackStatusCreateComplete: // Create complete @@ -188,19 +189,19 @@ func (r *ROSANetworkReconciler) reconcileNormal(ctx context.Context, rosaNetScop // Set the reason of true ROSANetworkReadyCondition to Created // We have to use conditions.Set(), since conditions.MarkTrue() does not support setting reason conditions.Set(rosaNetScope.ROSANetwork, - &clusterv1.Condition{ - Type: expinfrav1.ROSANetworkReadyCondition, + &clusterv1beta1.Condition{ + Type: expinfrav1beta1.ROSANetworkReadyCondition, Status: corev1.ConditionTrue, - Reason: expinfrav1.ROSANetworkCreatedReason, - Severity: clusterv1.ConditionSeverityInfo, + Reason: expinfrav1beta1.ROSANetworkCreatedReason, + Severity: clusterv1beta1.ConditionSeverityInfo, }) return ctrl.Result{}, nil case cloudformationtypes.StackStatusCreateFailed: // Create failed // Set the reason of false ROSANetworkReadyCondition to Failed conditions.MarkFalse(rosaNetScope.ROSANetwork, - expinfrav1.ROSANetworkReadyCondition, - expinfrav1.ROSANetworkFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.ROSANetworkReadyCondition, + expinfrav1beta1.ROSANetworkFailedReason, + clusterv1beta1.ConditionSeverityError, "") return ctrl.Result{}, fmt.Errorf("cloudformation stack %s creation failed, see the stack resources for more information", *r.cfStack.StackName) } @@ -222,26 +223,26 @@ func (r *ROSANetworkReconciler) reconcileDelete(ctx context.Context, rosaNetScop return ctrl.Result{RequeueAfter: time.Second * 60}, nil case cloudformationtypes.StackStatusDeleteFailed: // Deletion failed conditions.MarkFalse(rosaNetScope.ROSANetwork, - expinfrav1.ROSANetworkReadyCondition, - expinfrav1.ROSANetworkDeletionFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.ROSANetworkReadyCondition, + expinfrav1beta1.ROSANetworkDeletionFailedReason, + clusterv1beta1.ConditionSeverityError, "") return ctrl.Result{}, fmt.Errorf("CF stack deletion failed") default: // All the other states err := r.awsClient.DeleteCFStack(ctx, rosaNetScope.ROSANetwork.Spec.StackName) if err != nil { conditions.MarkFalse(rosaNetScope.ROSANetwork, - expinfrav1.ROSANetworkReadyCondition, - expinfrav1.ROSANetworkDeletionFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.ROSANetworkReadyCondition, + expinfrav1beta1.ROSANetworkDeletionFailedReason, + clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to start CF stack deletion: %w", err) } conditions.MarkFalse(rosaNetScope.ROSANetwork, - expinfrav1.ROSANetworkReadyCondition, - expinfrav1.ROSANetworkDeletingReason, - clusterv1.ConditionSeverityInfo, + expinfrav1beta1.ROSANetworkReadyCondition, + expinfrav1beta1.ROSANetworkDeletingReason, + clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{RequeueAfter: time.Second * 60}, nil } diff --git a/exp/controllers/rosanetwork_controller_test.go b/exp/controllers/rosanetwork_controller_test.go index 284fc00b94..8384ad170f 100644 --- a/exp/controllers/rosanetwork_controller_test.go +++ b/exp/controllers/rosanetwork_controller_test.go @@ -38,9 +38,10 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func TestROSANetworkReconciler_Reconcile(t *testing.T) { @@ -66,7 +67,8 @@ func TestROSANetworkReconciler_Reconcile(t *testing.T) { rosaNetwork := &expinfrav1.ROSANetwork{ ObjectMeta: metav1.ObjectMeta{ Name: name, - Namespace: ns.Name}, + Namespace: ns.Name, + }, Spec: expinfrav1.ROSANetworkSpec{ StackName: name, CIDRBlock: "10.0.0.0/8", @@ -86,7 +88,8 @@ func TestROSANetworkReconciler_Reconcile(t *testing.T) { rosaNetworkDeleted := &expinfrav1.ROSANetwork{ ObjectMeta: metav1.ObjectMeta{ Name: nameDeleted, - Namespace: ns.Name}, + Namespace: ns.Name, + }, Spec: expinfrav1.ROSANetworkSpec{ StackName: nameDeleted, CIDRBlock: "10.0.0.0/8", @@ -156,8 +159,8 @@ func TestROSANetworkReconciler_Reconcile(t *testing.T) { cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetwork) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cnd).ToNot(BeNil()) - g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkFailedReason)) - g.Expect(cnd.Severity).To(Equal(clusterv1.ConditionSeverityError)) + g.Expect(cnd.Reason).To(Equal(expinfrav1beta1.ROSANetworkFailedReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityError)) g.Expect(cnd.Message).To(Equal("test-error")) }) @@ -187,8 +190,8 @@ func TestROSANetworkReconciler_Reconcile(t *testing.T) { cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetwork) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cnd).ToNot(BeNil()) - g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkCreatingReason)) - g.Expect(cnd.Severity).To(Equal(clusterv1.ConditionSeverityInfo)) + g.Expect(cnd.Reason).To(Equal(expinfrav1beta1.ROSANetworkCreatingReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) }) t.Run("CF stack creation is in progress", func(t *testing.T) { @@ -219,8 +222,8 @@ func TestROSANetworkReconciler_Reconcile(t *testing.T) { cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetwork) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cnd).ToNot(BeNil()) - g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkCreatingReason)) - g.Expect(cnd.Severity).To(Equal(clusterv1.ConditionSeverityInfo)) + g.Expect(cnd.Reason).To(Equal(expinfrav1beta1.ROSANetworkCreatingReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) }) t.Run("CF stack creation completed", func(t *testing.T) { @@ -251,8 +254,8 @@ func TestROSANetworkReconciler_Reconcile(t *testing.T) { cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetwork) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cnd).ToNot(BeNil()) - g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkCreatedReason)) - g.Expect(cnd.Severity).To(Equal(clusterv1.ConditionSeverityInfo)) + g.Expect(cnd.Reason).To(Equal(expinfrav1beta1.ROSANetworkCreatedReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) }) t.Run("CF stack creation failed", func(t *testing.T) { @@ -283,8 +286,8 @@ func TestROSANetworkReconciler_Reconcile(t *testing.T) { cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetwork) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cnd).ToNot(BeNil()) - g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkFailedReason)) - g.Expect(cnd.Severity).To(Equal(clusterv1.ConditionSeverityError)) + g.Expect(cnd.Reason).To(Equal(expinfrav1beta1.ROSANetworkFailedReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityError)) }) t.Run("CF stack deletion start failed", func(t *testing.T) { @@ -317,8 +320,8 @@ func TestROSANetworkReconciler_Reconcile(t *testing.T) { cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetworkDeleted) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cnd).ToNot(BeNil()) - g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkDeletionFailedReason)) - g.Expect(cnd.Severity).To(Equal(clusterv1.ConditionSeverityError)) + g.Expect(cnd.Reason).To(Equal(expinfrav1beta1.ROSANetworkDeletionFailedReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityError)) }) t.Run("CF stack deletion start succeeded", func(t *testing.T) { @@ -351,8 +354,8 @@ func TestROSANetworkReconciler_Reconcile(t *testing.T) { cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetworkDeleted) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cnd).ToNot(BeNil()) - g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkDeletingReason)) - g.Expect(cnd.Severity).To(Equal(clusterv1.ConditionSeverityInfo)) + g.Expect(cnd.Reason).To(Equal(expinfrav1beta1.ROSANetworkDeletingReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityInfo)) }) t.Run("CF stack deletion in progress", func(t *testing.T) { @@ -414,8 +417,8 @@ func TestROSANetworkReconciler_Reconcile(t *testing.T) { cnd, err := getROSANetworkReadyCondition(reconciler, rosaNetworkDeleted) g.Expect(err).NotTo(HaveOccurred()) g.Expect(cnd).ToNot(BeNil()) - g.Expect(cnd.Reason).To(Equal(expinfrav1.ROSANetworkDeletionFailedReason)) - g.Expect(cnd.Severity).To(Equal(clusterv1.ConditionSeverityError)) + g.Expect(cnd.Reason).To(Equal(expinfrav1beta1.ROSANetworkDeletionFailedReason)) + g.Expect(cnd.Severity).To(Equal(clusterv1beta1.ConditionSeverityError)) }) cleanupObject(g, rosaNetwork) @@ -599,7 +602,8 @@ func mockDescribeStacksCall(mockCFClient *rosaMocks.MockCloudFormationApiClient, DescribeStacks(gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(func(_ context.Context, _ *cloudformation.DescribeStacksInput, - _ ...func(*cloudformation.Options)) (*cloudformation.DescribeStacksOutput, error) { + _ ...func(*cloudformation.Options), + ) (*cloudformation.DescribeStacksOutput, error) { return output, err }). Times(times) @@ -611,7 +615,8 @@ func mockCreateStackCall(mockCFClient *rosaMocks.MockCloudFormationApiClient, ou CreateStack(gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(func(_ context.Context, _ *cloudformation.CreateStackInput, - _ ...func(*cloudformation.Options)) (*cloudformation.CreateStackOutput, error) { + _ ...func(*cloudformation.Options), + ) (*cloudformation.CreateStackOutput, error) { return output, err }). Times(times) @@ -623,7 +628,8 @@ func mockDescribeStackResourcesCall(mockCFClient *rosaMocks.MockCloudFormationAp DescribeStackResources(gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(func(_ context.Context, _ *cloudformation.DescribeStackResourcesInput, - _ ...func(*cloudformation.Options)) (*cloudformation.DescribeStackResourcesOutput, error) { + _ ...func(*cloudformation.Options), + ) (*cloudformation.DescribeStackResourcesOutput, error) { return output, err }). Times(times) @@ -635,7 +641,8 @@ func mockDeleteStackCall(mockCFClient *rosaMocks.MockCloudFormationApiClient, ou DeleteStack(gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(func(_ context.Context, _ *cloudformation.DeleteStackInput, - _ ...func(*cloudformation.Options)) (*cloudformation.DeleteStackOutput, error) { + _ ...func(*cloudformation.Options), + ) (*cloudformation.DeleteStackOutput, error) { return output, err }). Times(times) @@ -647,7 +654,8 @@ func mockDescribeSubnetsCall(mockEc2Client *rosaMocks.MockEc2ApiClient, output * DescribeSubnets(gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(func(_ context.Context, _ *ec2.DescribeSubnetsInput, - _ ...func(*ec2.Options)) (*ec2.DescribeSubnetsOutput, error) { + _ ...func(*ec2.Options), + ) (*ec2.DescribeSubnetsOutput, error) { return output, err }). Times(times) @@ -681,12 +689,12 @@ func deleteROSANetwork(ctx context.Context, rosaNetwork *expinfrav1.ROSANetwork) return nil } -func getROSANetworkReadyCondition(reconciler *ROSANetworkReconciler, rosaNet *expinfrav1.ROSANetwork) (*clusterv1.Condition, error) { +func getROSANetworkReadyCondition(reconciler *ROSANetworkReconciler, rosaNet *expinfrav1.ROSANetwork) (*clusterv1beta1.Condition, error) { updatedROSANetwork := &expinfrav1.ROSANetwork{} if err := reconciler.Client.Get(ctx, client.ObjectKeyFromObject(rosaNet), updatedROSANetwork); err != nil { return nil, err } - return conditions.Get(updatedROSANetwork, expinfrav1.ROSANetworkReadyCondition), nil + return conditions.Get(updatedROSANetwork, expinfrav1beta1.ROSANetworkReadyCondition), nil } diff --git a/exp/controllers/rosaroleconfig_controller.go b/exp/controllers/rosaroleconfig_controller.go index b136306951..150642206c 100644 --- a/exp/controllers/rosaroleconfig_controller.go +++ b/exp/controllers/rosaroleconfig_controller.go @@ -45,14 +45,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" stsiface "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -103,14 +104,13 @@ func (r *ROSARoleConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reque ControllerName: "rosaroleconfig", Logger: log, }) - if err != nil { return ctrl.Result{}, fmt.Errorf("failed to create rosaroleconfig scope: %w", err) } // Always close the scope and set summary condition defer func() { - conditions.SetSummary(scope.RosaRoleConfig, conditions.WithConditions(expinfrav1.RosaRoleConfigReadyCondition), conditions.WithStepCounter()) + conditions.SetSummary(scope.RosaRoleConfig, conditions.WithConditions(expinfrav1beta1.RosaRoleConfigReadyCondition), conditions.WithStepCounter()) if err := scope.PatchObject(); err != nil { reterr = errors.Join(reterr, err) } @@ -122,7 +122,7 @@ func (r *ROSARoleConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reque if !roleConfig.DeletionTimestamp.IsZero() { scope.Info("Deleting ROSARoleConfig.") - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigDeletionStarted, clusterv1.ConditionSeverityInfo, "Deletion of RosaRolesConfig started") + conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigDeletionStarted, clusterv1beta1.ConditionSeverityInfo, "Deletion of RosaRolesConfig started") err = r.reconcileDelete(scope) if err == nil { controllerutil.RemoveFinalizer(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigFinalizer) @@ -136,36 +136,36 @@ func (r *ROSARoleConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reque } if err := r.reconcileAccountRoles(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigReconciliationFailedReason, clusterv1.ConditionSeverityError, "Account Roles failure: %v", err) + conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "Account Roles failure: %v", err) return ctrl.Result{}, fmt.Errorf("account Roles: %w", err) } if err := r.reconcileOIDC(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigReconciliationFailedReason, clusterv1.ConditionSeverityError, "OIDC Config/provider failure: %v", err) + conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "OIDC Config/provider failure: %v", err) return ctrl.Result{}, fmt.Errorf("oicd Config: %w", err) } if err := r.reconcileOperatorRoles(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigReconciliationFailedReason, clusterv1.ConditionSeverityError, "Operator Roles failure: %v", err) + conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "Operator Roles failure: %v", err) return ctrl.Result{}, fmt.Errorf("operator Roles: %w", err) } if r.rosaRolesConfigReady(scope.RosaRoleConfig) { conditions.Set(scope.RosaRoleConfig, - &clusterv1.Condition{ - Type: expinfrav1.RosaRoleConfigReadyCondition, + &clusterv1beta1.Condition{ + Type: expinfrav1beta1.RosaRoleConfigReadyCondition, Status: corev1.ConditionTrue, - Reason: expinfrav1.RosaRoleConfigCreatedReason, - Severity: clusterv1.ConditionSeverityInfo, + Reason: expinfrav1beta1.RosaRoleConfigCreatedReason, + Severity: clusterv1beta1.ConditionSeverityInfo, Message: "RosaRoleConfig is ready", }) } else { conditions.Set(scope.RosaRoleConfig, - &clusterv1.Condition{ - Type: expinfrav1.RosaRoleConfigReadyCondition, + &clusterv1beta1.Condition{ + Type: expinfrav1beta1.RosaRoleConfigReadyCondition, Status: corev1.ConditionFalse, - Reason: expinfrav1.RosaRoleConfigCreatedReason, - Severity: clusterv1.ConditionSeverityInfo, + Reason: expinfrav1beta1.RosaRoleConfigCreatedReason, + Severity: clusterv1beta1.ConditionSeverityInfo, Message: "RosaRoleConfig not ready", }) } @@ -175,17 +175,17 @@ func (r *ROSARoleConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reque func (r *ROSARoleConfigReconciler) reconcileDelete(scope *scope.RosaRoleConfigScope) error { if err := r.deleteOperatorRoles(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigDeletionFailedReason, clusterv1.ConditionSeverityError, "Failed to delete operator roles: %v", err) + conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete operator roles: %v", err) return err } if err := r.deleteOIDC(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigDeletionFailedReason, clusterv1.ConditionSeverityError, "Failed to delete OIDC provider: %v", err) + conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete OIDC provider: %v", err) return err } if err := r.deleteAccountRoles(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigDeletionFailedReason, clusterv1.ConditionSeverityError, "Failed to delete account roles: %v", err) + conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete account roles: %v", err) return err } @@ -343,9 +343,11 @@ func (r *ROSARoleConfigReconciler) deleteAccountRoles(scope *scope.RosaRoleConfi // list all account role names. prefix := scope.RosaRoleConfig.Spec.AccountRoleConfig.Prefix hasSharedVpcPolicies := scope.RosaRoleConfig.Spec.AccountRoleConfig.SharedVPCConfig.IsSharedVPC() - roleNames := []string{fmt.Sprintf("%s%s", prefix, expinfrav1.HCPROSAInstallerRole), + roleNames := []string{ + fmt.Sprintf("%s%s", prefix, expinfrav1.HCPROSAInstallerRole), fmt.Sprintf("%s%s", prefix, expinfrav1.HCPROSASupportRole), - fmt.Sprintf("%s%s", prefix, expinfrav1.HCPROSAWorkerRole)} + fmt.Sprintf("%s%s", prefix, expinfrav1.HCPROSAWorkerRole), + } var errs []error for _, roleName := range roleNames { @@ -391,14 +393,16 @@ func (r *ROSARoleConfigReconciler) deleteOperatorRoles(scope *scope.RosaRoleConf } // list all operator role names. - roleNames := []string{fmt.Sprintf("%s%s", prefix, expinfrav1.ControlPlaneOperatorARNSuffix), + roleNames := []string{ + fmt.Sprintf("%s%s", prefix, expinfrav1.ControlPlaneOperatorARNSuffix), fmt.Sprintf("%s%s", prefix, expinfrav1.ImageRegistryARNSuffix), fmt.Sprintf("%s%s", prefix, expinfrav1.IngressOperatorARNSuffix), fmt.Sprintf("%s%s", prefix, expinfrav1.KMSProviderARNSuffix), fmt.Sprintf("%s%s", prefix, expinfrav1.KubeCloudControllerARNSuffix), fmt.Sprintf("%s%s", prefix, expinfrav1.NetworkARNSuffix), fmt.Sprintf("%s%s", prefix, expinfrav1.NodePoolManagementARNSuffix), - fmt.Sprintf("%s%s", prefix, expinfrav1.StorageARNSuffix)} + fmt.Sprintf("%s%s", prefix, expinfrav1.StorageARNSuffix), + } allSharedVpcPoliciesNotDeleted := make(map[string]bool) var errs []error diff --git a/exp/controllers/rosaroleconfig_controller_test.go b/exp/controllers/rosaroleconfig_controller_test.go index 8f6f370412..a27feb334d 100644 --- a/exp/controllers/rosaroleconfig_controller_test.go +++ b/exp/controllers/rosaroleconfig_controller_test.go @@ -45,8 +45,9 @@ import ( ctrl "sigs.k8s.io/controller-runtime" rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // generateTestID creates a unique identifier for test resources. @@ -390,7 +391,7 @@ func TestROSARoleConfigReconcileCreate(t *testing.T) { // Ready condition should be false. for _, condition := range updatedRoleConfig.Status.Conditions { - if condition.Type == expinfrav1.RosaRoleConfigReadyCondition { + if condition.Type == expinfrav1beta1.RosaRoleConfigReadyCondition { g.Expect(condition.Status).To(Equal(corev1.ConditionFalse)) break } @@ -626,10 +627,10 @@ func TestROSARoleConfigReconcileExist(t *testing.T) { g.Expect(updatedRoleConfig.Status.OperatorRolesRef.KMSProviderARN).To(Equal("arn:aws:iam::123456789012:role/test-kube-system-kms-provider")) // Should have a condition indicating success - expect Ready condition to be True - readyCondition := conditions.Get(updatedRoleConfig, expinfrav1.RosaRoleConfigReadyCondition) + readyCondition := conditions.Get(updatedRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition) g.Expect(readyCondition).ToNot(BeNil()) g.Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue)) - g.Expect(readyCondition.Reason).To(Equal(expinfrav1.RosaRoleConfigCreatedReason)) + g.Expect(readyCondition.Reason).To(Equal(expinfrav1beta1.RosaRoleConfigCreatedReason)) } func TestROSARoleConfigReconcileDelete(t *testing.T) { diff --git a/exp/controllers/suite_test.go b/exp/controllers/suite_test.go index 35f67b8f80..709f4d1f97 100644 --- a/exp/controllers/suite_test.go +++ b/exp/controllers/suite_test.go @@ -31,7 +31,7 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to diff --git a/exp/instancestate/suite_test.go b/exp/instancestate/suite_test.go index 28892be7ed..e1e9ecbb77 100644 --- a/exp/instancestate/suite_test.go +++ b/exp/instancestate/suite_test.go @@ -30,7 +30,7 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/instancestate/mock_sqsiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // These tests use Ginkgo (BDD-style Go testing framework). Refer to diff --git a/go.mod b/go.mod index 64a4ec9b9e..7e02efa7be 100644 --- a/go.mod +++ b/go.mod @@ -42,7 +42,7 @@ require ( github.com/openshift-online/ocm-api-model/clientapi v0.0.431 github.com/openshift-online/ocm-common v0.0.31 github.com/openshift-online/ocm-sdk-go v0.1.476 - github.com/openshift/rosa v1.2.55 + github.com/openshift/rosa v1.2.57 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.23.0 github.com/sergi/go-diff v1.3.1 diff --git a/go.sum b/go.sum index c598a26d03..905462ce08 100644 --- a/go.sum +++ b/go.sum @@ -499,8 +499,8 @@ github.com/openshift-online/ocm-common v0.0.31 h1:csxB4UQAUhwhDOVBmOzUKgtemuwV9r github.com/openshift-online/ocm-common v0.0.31/go.mod h1:VEkuZp9aqbXtetZ5ycND6QpvhykvTuBF3oPsVM1X3vI= github.com/openshift-online/ocm-sdk-go v0.1.476 h1:l5gp/QEqnocqM02m7pDeS9ndXcCTBamewVSGaymd88Y= github.com/openshift-online/ocm-sdk-go v0.1.476/go.mod h1:ds+aOAlQbiK0ubZP3CwXkzd7m48v6fMQ1ef9UCrjzBY= -github.com/openshift/rosa v1.2.55 h1:Y6UD1474aExF4bZSh2KH4zE+Xl2NVsiuj3TLQGT9U+Y= -github.com/openshift/rosa v1.2.55/go.mod h1:EE0yTEjbwxfnH/9YbQZaUXUVbIzfPa9KCRNw19QdLsw= +github.com/openshift/rosa v1.2.57 h1:f2nZCEdQs0a1jadLvgM9Za36ilJq6z+IhteJNnNcTlc= +github.com/openshift/rosa v1.2.57/go.mod h1:kb6iV145TXhUWBONqlflNIYNbrcYGLk/SFZD6vNx4wM= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= diff --git a/hack/tools/third_party/conversion-gen/main.go b/hack/tools/third_party/conversion-gen/main.go index 15c29f3031..04b60c4fa1 100644 --- a/hack/tools/third_party/conversion-gen/main.go +++ b/hack/tools/third_party/conversion-gen/main.go @@ -135,4 +135,3 @@ func main() { } klog.V(2).Info("Completed successfully.") } - diff --git a/main.go b/main.go index c4b2a76496..f33fb17328 100644 --- a/main.go +++ b/main.go @@ -65,7 +65,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/version" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/flags" ) diff --git a/pkg/cloud/endpoints/partitions.go b/pkg/cloud/endpoints/partitions.go index 65ff2f0d6a..66c42917c3 100644 --- a/pkg/cloud/endpoints/partitions.go +++ b/pkg/cloud/endpoints/partitions.go @@ -129,6 +129,13 @@ var partitions = []Partition { SupportsFIPS: nil, SupportsDualStack: nil, }, + "ap-southeast-6": RegionOverrides{ + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, "ap-southeast-7": RegionOverrides{ Name: nil, DnsSuffix: nil, @@ -314,32 +321,18 @@ var partitions = []Partition { }, }, Partition { - ID: "aws-us-gov", - RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + ID: "aws-eusc", + RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-us-gov", - DnsSuffix: "amazonaws.com", - DualStackDnsSuffix: "api.aws", + Name: "aws-eusc", + DnsSuffix: "amazonaws.eu", + DualStackDnsSuffix: "api.amazonwebservices.eu", SupportsFIPS: true, SupportsDualStack: true, - ImplicitGlobalRegion: "us-gov-west-1", + ImplicitGlobalRegion: "eusc-de-east-1", }, Regions: map[string]RegionOverrides { - "aws-us-gov-global": RegionOverrides{ - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-east-1": RegionOverrides{ - Name: nil, - DnsSuffix: nil, - DualStackDnsSuffix: nil, - SupportsFIPS: nil, - SupportsDualStack: nil, - }, - "us-gov-west-1": RegionOverrides{ + "eusc-de-east-1": RegionOverrides{ Name: nil, DnsSuffix: nil, DualStackDnsSuffix: nil, @@ -354,9 +347,9 @@ var partitions = []Partition { DefaultConfig: PartitionConfig{ Name: "aws-iso", DnsSuffix: "c2s.ic.gov", - DualStackDnsSuffix: "c2s.ic.gov", + DualStackDnsSuffix: "api.aws.ic.gov", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "us-iso-east-1", }, Regions: map[string]RegionOverrides { @@ -389,9 +382,9 @@ var partitions = []Partition { DefaultConfig: PartitionConfig{ Name: "aws-iso-b", DnsSuffix: "sc2s.sgov.gov", - DualStackDnsSuffix: "sc2s.sgov.gov", + DualStackDnsSuffix: "api.aws.scloud", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "us-isob-east-1", }, Regions: map[string]RegionOverrides { @@ -417,9 +410,9 @@ var partitions = []Partition { DefaultConfig: PartitionConfig{ Name: "aws-iso-e", DnsSuffix: "cloud.adc-e.uk", - DualStackDnsSuffix: "cloud.adc-e.uk", + DualStackDnsSuffix: "api.cloud-aws.adc-e.uk", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "eu-isoe-west-1", }, Regions: map[string]RegionOverrides { @@ -445,9 +438,9 @@ var partitions = []Partition { DefaultConfig: PartitionConfig{ Name: "aws-iso-f", DnsSuffix: "csp.hci.ic.gov", - DualStackDnsSuffix: "csp.hci.ic.gov", + DualStackDnsSuffix: "api.aws.hci.ic.gov", SupportsFIPS: true, - SupportsDualStack: false, + SupportsDualStack: true, ImplicitGlobalRegion: "us-isof-south-1", }, Regions: map[string]RegionOverrides { @@ -475,18 +468,32 @@ var partitions = []Partition { }, }, Partition { - ID: "aws-eusc", - RegionRegex: "^eusc\\-(de)\\-\\w+\\-\\d+$", + ID: "aws-us-gov", + RegionRegex: "^us\\-gov\\-\\w+\\-\\d+$", DefaultConfig: PartitionConfig{ - Name: "aws-eusc", - DnsSuffix: "amazonaws.eu", - DualStackDnsSuffix: "amazonaws.eu", + Name: "aws-us-gov", + DnsSuffix: "amazonaws.com", + DualStackDnsSuffix: "api.aws", SupportsFIPS: true, - SupportsDualStack: false, - ImplicitGlobalRegion: "eusc-de-east-1", + SupportsDualStack: true, + ImplicitGlobalRegion: "us-gov-west-1", }, Regions: map[string]RegionOverrides { - "eusc-de-east-1": RegionOverrides{ + "aws-us-gov-global": RegionOverrides{ + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-east-1": RegionOverrides{ + Name: nil, + DnsSuffix: nil, + DualStackDnsSuffix: nil, + SupportsFIPS: nil, + SupportsDualStack: nil, + }, + "us-gov-west-1": RegionOverrides{ Name: nil, DnsSuffix: nil, DualStackDnsSuffix: nil, diff --git a/pkg/cloud/interfaces.go b/pkg/cloud/interfaces.go index d4eaaf2994..1f4fb37091 100644 --- a/pkg/cloud/interfaces.go +++ b/pkg/cloud/interfaces.go @@ -27,8 +27,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // Session represents an AWS session. @@ -70,7 +70,7 @@ type ClusterScoper interface { InfraCluster() ClusterObject // Cluster returns the cluster object. - ClusterObj() ClusterObject + ClusterObj() *clusterv1.Cluster // UnstructuredControlPlane returns the unstructured control plane object. UnstructuredControlPlane() (*unstructured.Unstructured, error) @@ -84,7 +84,7 @@ type ClusterScoper interface { // AdditionalTags returns any tags that you would like to attach to AWS resources. The returned value will never be nil. AdditionalTags() infrav1.Tags // SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input. - SetFailureDomain(id string, spec clusterv1.FailureDomainSpec) + SetFailureDomain(id string, spec clusterv1.FailureDomain) // PatchObject persists the cluster configuration and status. PatchObject() error // Close closes the current scope persisting the cluster configuration and status. diff --git a/pkg/cloud/scope/cluster.go b/pkg/cloud/scope/cluster.go index 5fb6b04bce..8f3255a1cd 100644 --- a/pkg/cloud/scope/cluster.go +++ b/pkg/cloud/scope/cluster.go @@ -27,14 +27,16 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ClusterScopeParams defines the input parameters used to create a new Scope. @@ -250,26 +252,26 @@ func (s *ClusterScope) ListOptionsLabelSelector() client.ListOption { func (s *ClusterScope) PatchObject() error { // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding during the deletion process). - applicableConditions := []clusterv1.ConditionType{ - infrav1.VpcReadyCondition, - infrav1.SubnetsReadyCondition, - infrav1.ClusterSecurityGroupsReadyCondition, - infrav1.LoadBalancerReadyCondition, + applicableConditions := []clusterv1beta1.ConditionType{ + infrav1beta1.VpcReadyCondition, + infrav1beta1.SubnetsReadyCondition, + infrav1beta1.ClusterSecurityGroupsReadyCondition, + infrav1beta1.LoadBalancerReadyCondition, } if s.VPC().IsManaged(s.Name()) { applicableConditions = append(applicableConditions, - infrav1.InternetGatewayReadyCondition, - infrav1.NatGatewaysReadyCondition, - infrav1.RouteTablesReadyCondition, - infrav1.VpcEndpointsReadyCondition, + infrav1beta1.InternetGatewayReadyCondition, + infrav1beta1.NatGatewaysReadyCondition, + infrav1beta1.RouteTablesReadyCondition, + infrav1beta1.VpcEndpointsReadyCondition, ) if s.AWSCluster.Spec.Bastion.Enabled { - applicableConditions = append(applicableConditions, infrav1.BastionHostReadyCondition) + applicableConditions = append(applicableConditions, infrav1beta1.BastionHostReadyCondition) } if s.VPC().IsIPv6Enabled() { - applicableConditions = append(applicableConditions, infrav1.EgressOnlyInternetGatewayReadyCondition) + applicableConditions = append(applicableConditions, infrav1beta1.EgressOnlyInternetGatewayReadyCondition) } } @@ -282,20 +284,20 @@ func (s *ClusterScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.AWSCluster, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - clusterv1.ReadyCondition, - infrav1.VpcReadyCondition, - infrav1.SubnetsReadyCondition, - infrav1.InternetGatewayReadyCondition, - infrav1.EgressOnlyInternetGatewayReadyCondition, - infrav1.NatGatewaysReadyCondition, - infrav1.RouteTablesReadyCondition, - infrav1.VpcEndpointsReadyCondition, - infrav1.ClusterSecurityGroupsReadyCondition, - infrav1.BastionHostReadyCondition, - infrav1.LoadBalancerReadyCondition, - infrav1.PrincipalUsageAllowedCondition, - infrav1.PrincipalCredentialRetrievedCondition, + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + clusterv1beta1.ReadyCondition, + infrav1beta1.VpcReadyCondition, + infrav1beta1.SubnetsReadyCondition, + infrav1beta1.InternetGatewayReadyCondition, + infrav1beta1.EgressOnlyInternetGatewayReadyCondition, + infrav1beta1.NatGatewaysReadyCondition, + infrav1beta1.RouteTablesReadyCondition, + infrav1beta1.VpcEndpointsReadyCondition, + infrav1beta1.ClusterSecurityGroupsReadyCondition, + infrav1beta1.BastionHostReadyCondition, + infrav1beta1.LoadBalancerReadyCondition, + infrav1beta1.PrincipalUsageAllowedCondition, + infrav1beta1.PrincipalCredentialRetrievedCondition, }}) } @@ -315,16 +317,16 @@ func (s *ClusterScope) AdditionalTags() infrav1.Tags { // APIServerPort returns the APIServerPort to use when creating the load balancer. func (s *ClusterScope) APIServerPort() int32 { - if s.Cluster.Spec.ClusterNetwork != nil && s.Cluster.Spec.ClusterNetwork.APIServerPort != nil { - return *s.Cluster.Spec.ClusterNetwork.APIServerPort + if s.Cluster.Spec.ClusterNetwork.APIServerPort != 0 { + return s.Cluster.Spec.ClusterNetwork.APIServerPort } return infrav1.DefaultAPIServerPort } // SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input. -func (s *ClusterScope) SetFailureDomain(id string, spec clusterv1.FailureDomainSpec) { +func (s *ClusterScope) SetFailureDomain(id string, spec clusterv1.FailureDomain) { if s.AWSCluster.Status.FailureDomains == nil { - s.AWSCluster.Status.FailureDomains = make(clusterv1.FailureDomains) + s.AWSCluster.Status.FailureDomains = make(map[string]clusterv1.FailureDomain) } s.AWSCluster.Status.FailureDomains[id] = spec } @@ -345,7 +347,7 @@ func (s *ClusterScope) InfraCluster() cloud.ClusterObject { } // ClusterObj returns the cluster object. -func (s *ClusterScope) ClusterObj() cloud.ClusterObject { +func (s *ClusterScope) ClusterObj() *clusterv1.Cluster { return s.Cluster } diff --git a/pkg/cloud/scope/elb.go b/pkg/cloud/scope/elb.go index 4e3ce80c4f..8d092a92ee 100644 --- a/pkg/cloud/scope/elb.go +++ b/pkg/cloud/scope/elb.go @@ -19,7 +19,8 @@ package scope import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) // ELBScope is a scope for use with the ELB reconciling service. type ELBScope interface { diff --git a/pkg/cloud/scope/fargate.go b/pkg/cloud/scope/fargate.go index 73eec70484..27f67557d3 100644 --- a/pkg/cloud/scope/fargate.go +++ b/pkg/cloud/scope/fargate.go @@ -26,14 +26,16 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // FargateProfileScopeParams defines the input parameters used to create a new Scope. @@ -168,13 +170,13 @@ func (s *FargateProfileScope) Partition() string { // IAMReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *FargateProfileScope) IAMReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning + severity := clusterv1beta1.ConditionSeverityWarning if err == "" { - severity = clusterv1.ConditionSeverityInfo + severity = clusterv1beta1.ConditionSeverityInfo } conditions.MarkFalse( s.FargateProfile, - expinfrav1.IAMFargateRolesReadyCondition, + expinfrav1beta1.IAMFargateRolesReadyCondition, reason, severity, "%s", @@ -191,11 +193,11 @@ func (s *FargateProfileScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.FargateProfile, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - expinfrav1.EKSFargateProfileReadyCondition, - expinfrav1.EKSFargateCreatingCondition, - expinfrav1.EKSFargateDeletingCondition, - expinfrav1.IAMFargateRolesReadyCondition, + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + expinfrav1beta1.EKSFargateProfileReadyCondition, + expinfrav1beta1.EKSFargateCreatingCondition, + expinfrav1beta1.EKSFargateDeletingCondition, + expinfrav1beta1.IAMFargateRolesReadyCondition, }}) } @@ -210,7 +212,7 @@ func (s *FargateProfileScope) InfraCluster() cloud.ClusterObject { } // ClusterObj returns the cluster object. -func (s *FargateProfileScope) ClusterObj() cloud.ClusterObject { +func (s *FargateProfileScope) ClusterObj() *clusterv1.Cluster { return s.Cluster } diff --git a/pkg/cloud/scope/launchtemplate.go b/pkg/cloud/scope/launchtemplate.go index 5551a33325..783597282e 100644 --- a/pkg/cloud/scope/launchtemplate.go +++ b/pkg/cloud/scope/launchtemplate.go @@ -24,8 +24,8 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // LaunchTemplateScope defines a scope defined around a launch template. diff --git a/pkg/cloud/scope/machine.go b/pkg/cloud/scope/machine.go index 756ab2d188..f905d6254d 100644 --- a/pkg/cloud/scope/machine.go +++ b/pkg/cloud/scope/machine.go @@ -27,15 +27,17 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // MachineScopeParams defines the input parameters used to create a new MachineScope. @@ -302,13 +304,13 @@ func (m *MachineScope) GetRawBootstrapDataWithFormat() ([]byte, string, error) { func (m *MachineScope) PatchObject() error { // Always update the readyCondition by summarizing the state of other conditions. // A step counter is added to represent progress during the provisioning process (instead we are hiding during the deletion process). - applicableConditions := []clusterv1.ConditionType{ - infrav1.InstanceReadyCondition, - infrav1.SecurityGroupsReadyCondition, + applicableConditions := []clusterv1beta1.ConditionType{ + infrav1beta1.InstanceReadyCondition, + infrav1beta1.SecurityGroupsReadyCondition, } if m.IsControlPlane() { - applicableConditions = append(applicableConditions, infrav1.ELBAttachedCondition) + applicableConditions = append(applicableConditions, infrav1beta1.ELBAttachedCondition) } conditions.SetSummary(m.AWSMachine, @@ -320,11 +322,11 @@ func (m *MachineScope) PatchObject() error { return m.patchHelper.Patch( context.TODO(), m.AWSMachine, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ clusterv1.ReadyCondition, - infrav1.InstanceReadyCondition, - infrav1.SecurityGroupsReadyCondition, - infrav1.ELBAttachedCondition, + infrav1beta1.InstanceReadyCondition, + infrav1beta1.SecurityGroupsReadyCondition, + infrav1beta1.ELBAttachedCondition, }}) } @@ -354,19 +356,19 @@ func (m *MachineScope) HasFailed() bool { // InstanceIsRunning returns the instance state of the machine scope. func (m *MachineScope) InstanceIsRunning() bool { state := m.GetInstanceState() - return state != nil && infrav1.InstanceRunningStates.Has(string(*state)) + return state != nil && infrav1beta1.InstanceRunningStates.Has(string(*state)) } // InstanceIsOperational returns the operational state of the machine scope. func (m *MachineScope) InstanceIsOperational() bool { state := m.GetInstanceState() - return state != nil && infrav1.InstanceOperationalStates.Has(string(*state)) + return state != nil && infrav1beta1.InstanceOperationalStates.Has(string(*state)) } // InstanceIsInKnownState checks if the machine scope's instance state is known. func (m *MachineScope) InstanceIsInKnownState() bool { state := m.GetInstanceState() - return state != nil && infrav1.InstanceKnownStates.Has(string(*state)) + return state != nil && infrav1beta1.InstanceKnownStates.Has(string(*state)) } // AWSMachineIsDeleted checks if the AWS machine was deleted. diff --git a/pkg/cloud/scope/machine_test.go b/pkg/cloud/scope/machine_test.go index 6c509cd782..9e219fedbc 100644 --- a/pkg/cloud/scope/machine_test.go +++ b/pkg/cloud/scope/machine_test.go @@ -28,7 +28,8 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) func setupScheme() (*runtime.Scheme, error) { scheme := runtime.NewScheme() diff --git a/pkg/cloud/scope/machinepool.go b/pkg/cloud/scope/machinepool.go index 1afe0172d3..a27524f10e 100644 --- a/pkg/cloud/scope/machinepool.go +++ b/pkg/cloud/scope/machinepool.go @@ -32,13 +32,15 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/remote" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // MachinePoolScope defines a scope defined around a machine and its cluster. @@ -174,9 +176,9 @@ func (m *MachinePoolScope) PatchObject() error { return m.patchHelper.Patch( context.TODO(), m.AWSMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - expinfrav1.ASGReadyCondition, - expinfrav1.LaunchTemplateReadyCondition, + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + expinfrav1beta1.ASGReadyCondition, + expinfrav1beta1.LaunchTemplateReadyCondition, }}) } diff --git a/pkg/cloud/scope/managedcontrolplane.go b/pkg/cloud/scope/managedcontrolplane.go index 3e0ddad6a8..af268cf08b 100644 --- a/pkg/cloud/scope/managedcontrolplane.go +++ b/pkg/cloud/scope/managedcontrolplane.go @@ -32,20 +32,21 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta1" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/remote" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) -var ( - scheme = runtime.NewScheme() -) +var scheme = runtime.NewScheme() func init() { _ = amazoncni.AddToScheme(scheme) @@ -268,20 +269,20 @@ func (s *ManagedControlPlaneScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ControlPlane, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - infrav1.VpcReadyCondition, - infrav1.SubnetsReadyCondition, - infrav1.ClusterSecurityGroupsReadyCondition, - infrav1.InternetGatewayReadyCondition, - infrav1.NatGatewaysReadyCondition, - infrav1.RouteTablesReadyCondition, - infrav1.VpcEndpointsReadyCondition, - infrav1.BastionHostReadyCondition, - infrav1.EgressOnlyInternetGatewayReadyCondition, - ekscontrolplanev1.EKSControlPlaneCreatingCondition, - ekscontrolplanev1.EKSControlPlaneReadyCondition, - ekscontrolplanev1.EKSControlPlaneUpdatingCondition, - ekscontrolplanev1.IAMControlPlaneRolesReadyCondition, + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + infrav1beta1.VpcReadyCondition, + infrav1beta1.SubnetsReadyCondition, + infrav1beta1.ClusterSecurityGroupsReadyCondition, + infrav1beta1.InternetGatewayReadyCondition, + infrav1beta1.NatGatewaysReadyCondition, + infrav1beta1.RouteTablesReadyCondition, + infrav1beta1.VpcEndpointsReadyCondition, + infrav1beta1.BastionHostReadyCondition, + infrav1beta1.EgressOnlyInternetGatewayReadyCondition, + ekscontrolplanev1beta1.EKSControlPlaneCreatingCondition, + ekscontrolplanev1beta1.EKSControlPlaneReadyCondition, + ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition, + ekscontrolplanev1beta1.IAMControlPlaneRolesReadyCondition, }}) } @@ -305,9 +306,9 @@ func (s *ManagedControlPlaneScope) APIServerPort() int32 { } // SetFailureDomain sets the infrastructure provider failure domain key to the spec given as input. -func (s *ManagedControlPlaneScope) SetFailureDomain(id string, spec clusterv1.FailureDomainSpec) { +func (s *ManagedControlPlaneScope) SetFailureDomain(id string, spec clusterv1.FailureDomain) { if s.ControlPlane.Status.FailureDomains == nil { - s.ControlPlane.Status.FailureDomains = make(clusterv1.FailureDomains) + s.ControlPlane.Status.FailureDomains = make(map[string]clusterv1.FailureDomain) } s.ControlPlane.Status.FailureDomains[id] = spec } @@ -318,7 +319,7 @@ func (s *ManagedControlPlaneScope) InfraCluster() cloud.ClusterObject { } // ClusterObj returns the cluster object. -func (s *ManagedControlPlaneScope) ClusterObj() cloud.ClusterObject { +func (s *ManagedControlPlaneScope) ClusterObj() *clusterv1.Cluster { return s.Cluster } @@ -447,12 +448,8 @@ func (s *ManagedControlPlaneScope) OIDCIdentityProviderConfig() *ekscontrolplane // ServiceCidrs returns the CIDR blocks used for services. func (s *ManagedControlPlaneScope) ServiceCidrs() *clusterv1.NetworkRanges { - if s.Cluster.Spec.ClusterNetwork != nil { - if s.Cluster.Spec.ClusterNetwork.Services != nil { - if len(s.Cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 { - return s.Cluster.Spec.ClusterNetwork.Services - } - } + if len(s.Cluster.Spec.ClusterNetwork.Services.CIDRBlocks) > 0 { + return &s.Cluster.Spec.ClusterNetwork.Services } return nil diff --git a/pkg/cloud/scope/managednodegroup.go b/pkg/cloud/scope/managednodegroup.go index c8851c500d..946d5496a1 100644 --- a/pkg/cloud/scope/managednodegroup.go +++ b/pkg/cloud/scope/managednodegroup.go @@ -32,14 +32,16 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/endpoints" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ManagedMachinePoolScopeParams defines the input parameters used to create a new Scope. @@ -200,7 +202,10 @@ func (s *ManagedMachinePoolScope) RoleName() string { // Version returns the nodegroup Kubernetes version. func (s *ManagedMachinePoolScope) Version() *string { - return s.MachinePool.Spec.Template.Spec.Version + if s.MachinePool.Spec.Template.Spec.Version == "" { + return nil + } + return &s.MachinePool.Spec.Template.Spec.Version } // ControlPlaneSubnets returns the control plane subnets. @@ -227,13 +232,13 @@ func (s *ManagedMachinePoolScope) SubnetIDs() ([]string, error) { // NodegroupReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *ManagedMachinePoolScope) NodegroupReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning + severity := clusterv1beta1.ConditionSeverityWarning if err == "" { - severity = clusterv1.ConditionSeverityInfo + severity = clusterv1beta1.ConditionSeverityInfo } conditions.MarkFalse( s.ManagedMachinePool, - expinfrav1.EKSNodegroupReadyCondition, + expinfrav1beta1.EKSNodegroupReadyCondition, reason, severity, "%s", @@ -248,13 +253,13 @@ func (s *ManagedMachinePoolScope) NodegroupReadyFalse(reason string, err string) // IAMReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *ManagedMachinePoolScope) IAMReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning + severity := clusterv1beta1.ConditionSeverityWarning if err == "" { - severity = clusterv1.ConditionSeverityInfo + severity = clusterv1beta1.ConditionSeverityInfo } conditions.MarkFalse( s.ManagedMachinePool, - expinfrav1.IAMNodegroupRolesReadyCondition, + expinfrav1beta1.IAMNodegroupRolesReadyCondition, reason, severity, "%s", @@ -271,9 +276,9 @@ func (s *ManagedMachinePoolScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ManagedMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - expinfrav1.EKSNodegroupReadyCondition, - expinfrav1.IAMNodegroupRolesReadyCondition, + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + expinfrav1beta1.EKSNodegroupReadyCondition, + expinfrav1beta1.IAMNodegroupRolesReadyCondition, }}) } @@ -296,7 +301,7 @@ func (s *ManagedMachinePoolScope) InfraCluster() cloud.ClusterObject { } // ClusterObj returns the cluster object. -func (s *ManagedMachinePoolScope) ClusterObj() cloud.ClusterObject { +func (s *ManagedMachinePoolScope) ClusterObj() *clusterv1.Cluster { return s.Cluster } diff --git a/pkg/cloud/scope/rosacontrolplane.go b/pkg/cloud/scope/rosacontrolplane.go index c292aa59c2..21b78080e9 100644 --- a/pkg/cloud/scope/rosacontrolplane.go +++ b/pkg/cloud/scope/rosacontrolplane.go @@ -30,13 +30,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + rosacontrolplanev1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta1" rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" stsservice "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ROSAControlPlaneScopeParams defines the input parameters used to create a new ROSAControlPlaneScope. @@ -138,9 +140,11 @@ func (s *ROSAControlPlaneScope) ControllerName() string { return s.controllerName } -var _ cloud.ScopeUsage = (*ROSAControlPlaneScope)(nil) -var _ cloud.Session = (*ROSAControlPlaneScope)(nil) -var _ cloud.SessionMetadata = (*ROSAControlPlaneScope)(nil) +var ( + _ cloud.ScopeUsage = (*ROSAControlPlaneScope)(nil) + _ cloud.Session = (*ROSAControlPlaneScope)(nil) + _ cloud.SessionMetadata = (*ROSAControlPlaneScope)(nil) +) // Name returns the CAPI cluster name. func (s *ROSAControlPlaneScope) Name() string { @@ -212,10 +216,10 @@ func (s *ROSAControlPlaneScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ControlPlane, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - rosacontrolplanev1.ROSAControlPlaneReadyCondition, - rosacontrolplanev1.ROSAControlPlaneValidCondition, - rosacontrolplanev1.ROSAControlPlaneUpgradingCondition, + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, + rosacontrolplanev1beta1.ROSAControlPlaneValidCondition, + rosacontrolplanev1beta1.ROSAControlPlaneUpgradingCondition, }}) } diff --git a/pkg/cloud/scope/rosamachinepool.go b/pkg/cloud/scope/rosamachinepool.go index 130e091135..2f7ad536a4 100644 --- a/pkg/cloud/scope/rosamachinepool.go +++ b/pkg/cloud/scope/rosamachinepool.go @@ -26,13 +26,15 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // RosaMachinePoolScopeParams defines the input parameters used to create a new Scope. @@ -142,7 +144,7 @@ func (s *RosaMachinePoolScope) InfraCluster() cloud.ClusterObject { } // ClusterObj returns the cluster object. -func (s *RosaMachinePoolScope) ClusterObj() cloud.ClusterObject { +func (s *RosaMachinePoolScope) ClusterObj() *clusterv1.Cluster { return s.Cluster } @@ -188,13 +190,13 @@ func (s *RosaMachinePoolScope) Namespace() string { // RosaMachinePoolReadyFalse marks the ready condition false using warning if error isn't // empty. func (s *RosaMachinePoolScope) RosaMachinePoolReadyFalse(reason string, err string) error { - severity := clusterv1.ConditionSeverityWarning + severity := clusterv1beta1.ConditionSeverityWarning if err == "" { - severity = clusterv1.ConditionSeverityInfo + severity = clusterv1beta1.ConditionSeverityInfo } conditions.MarkFalse( s.RosaMachinePool, - expinfrav1.RosaMachinePoolReadyCondition, + expinfrav1beta1.RosaMachinePoolReadyCondition, reason, severity, "%s", @@ -211,8 +213,8 @@ func (s *RosaMachinePoolScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.RosaMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - expinfrav1.RosaMachinePoolReadyCondition, + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + expinfrav1beta1.RosaMachinePoolReadyCondition, }}) } diff --git a/pkg/cloud/scope/rosanetwork.go b/pkg/cloud/scope/rosanetwork.go index bf455393b4..7b8727de99 100644 --- a/pkg/cloud/scope/rosanetwork.go +++ b/pkg/cloud/scope/rosanetwork.go @@ -25,12 +25,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/patch" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ROSANetworkScopeParams defines the input parameters used to create a new ROSANetworkScope. @@ -130,7 +131,7 @@ func (s *ROSANetworkScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ROSANetwork, - patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ - expinfrav1.ROSANetworkReadyCondition, + patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + expinfrav1beta1.ROSANetworkReadyCondition, }}) } diff --git a/pkg/cloud/scope/rosaroleconfig.go b/pkg/cloud/scope/rosaroleconfig.go index bc9edbbb2b..91c1eaea2c 100644 --- a/pkg/cloud/scope/rosaroleconfig.go +++ b/pkg/cloud/scope/rosaroleconfig.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // RosaRoleConfigScopeParams defines the input parameters used to create a new RosaRoleConfigScope. diff --git a/pkg/cloud/scope/session.go b/pkg/cloud/scope/session.go index 5f1adab09f..a4bac26e77 100644 --- a/pkg/cloud/scope/session.go +++ b/pkg/cloud/scope/session.go @@ -34,16 +34,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/identity" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/system" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" - "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) const ( @@ -91,7 +92,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScoper, region, log) if err != nil { // could not get providers and retrieve the credentials - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.PrincipalCredentialRetrievalFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1beta1.PrincipalCredentialRetrievedCondition, infrav1beta1.PrincipalCredentialRetrievalFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, nil, errors.Wrap(err, "Failed to get providers for cluster") } @@ -129,7 +130,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se // Check if identity credentials can be retrieved. One reason this will fail is that source identity is not authorized for assume role. _, err := providers[0].Retrieve(context.Background()) if err != nil { - conditions.MarkUnknown(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition, infrav1.CredentialProviderBuildFailedReason, "%s", err.Error()) + conditions.MarkUnknown(clusterScoper.InfraCluster(), infrav1beta1.PrincipalCredentialRetrievedCondition, infrav1beta1.CredentialProviderBuildFailedReason, "%s", err.Error()) // delete the existing session from cache. Otherwise, we give back a defective session on next method invocation with same cluster scope sessionCache.Delete(getSessionName(region, clusterScoper)) @@ -140,7 +141,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se optFns = append(optFns, config.WithCredentialsProvider(chainProvider)) } - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalCredentialRetrievedCondition) + conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1beta1.PrincipalCredentialRetrievedCondition) ns, err := config.LoadDefaultConfig(context.Background(), optFns...) if err != nil { @@ -288,21 +289,21 @@ func buildProvidersForRef( default: return providers, errors.Errorf("No such provider known: '%s'", ref.Kind) } - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition) + conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition) return providers, nil } func setPrincipalUsageAllowedCondition(clusterScoper cloud.SessionMetadata) { - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition) + conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition) } func setPrincipalUsageNotAllowedCondition(kind infrav1.AWSIdentityKind, identityObjectKey client.ObjectKey, clusterScoper cloud.SessionMetadata) { errMsg := fmt.Sprintf(notPermittedError, kind, identityObjectKey.Name) if clusterScoper.IdentityRef().Name == identityObjectKey.Name { - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.PrincipalUsageUnauthorizedReason, clusterv1.ConditionSeverityError, "%s", errMsg) + conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition, infrav1beta1.PrincipalUsageUnauthorizedReason, clusterv1beta1.ConditionSeverityError, "%s", errMsg) } else { - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1.PrincipalUsageAllowedCondition, infrav1.SourcePrincipalUsageUnauthorizedReason, clusterv1.ConditionSeverityError, "%s", errMsg) + conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition, infrav1beta1.SourcePrincipalUsageUnauthorizedReason, clusterv1beta1.ConditionSeverityError, "%s", errMsg) } } diff --git a/pkg/cloud/scope/session_test.go b/pkg/cloud/scope/session_test.go index 5665eac46b..39dc491d84 100644 --- a/pkg/cloud/scope/session_test.go +++ b/pkg/cloud/scope/session_test.go @@ -34,7 +34,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/identity" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/util/system" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) func TestIsClusterPermittedToUsePrincipal(t *testing.T) { testCases := []struct { diff --git a/pkg/cloud/scope/shared.go b/pkg/cloud/scope/shared.go index 2a4fabb487..e7f5e90e6b 100644 --- a/pkg/cloud/scope/shared.go +++ b/pkg/cloud/scope/shared.go @@ -27,7 +27,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/external" ) @@ -136,15 +136,15 @@ func (p *defaultSubnetPlacementStrategy) getSubnetsForAZs(azs []string, controlP // getUnstructuredControlPlane returns the unstructured object for the control plane, if any. // When the reference is not set, it returns an empty object. func getUnstructuredControlPlane(ctx context.Context, client client.Client, cluster *clusterv1.Cluster) (*unstructured.Unstructured, error) { - if cluster.Spec.ControlPlaneRef == nil { + if cluster.Spec.ControlPlaneRef.Name == "" { // If the control plane ref is not set, return an empty object. // Not having a control plane ref is valid given API contracts. return &unstructured.Unstructured{}, nil } - u, err := external.Get(ctx, client, cluster.Spec.ControlPlaneRef) + u, err := external.GetObjectFromContractVersionedRef(ctx, client, cluster.Spec.ControlPlaneRef, cluster.Namespace) if err != nil { - return nil, errors.Wrapf(err, "failed to retrieve control plane object %s/%s", cluster.Spec.ControlPlaneRef.Namespace, cluster.Spec.ControlPlaneRef.Name) + return nil, errors.Wrapf(err, "failed to retrieve control plane object %s/%s", cluster.Namespace, cluster.Spec.ControlPlaneRef.Name) } return u, nil } diff --git a/pkg/cloud/services/autoscaling/autoscalinggroup.go b/pkg/cloud/services/autoscaling/autoscalinggroup.go index 8067c9c673..123065fc24 100644 --- a/pkg/cloud/services/autoscaling/autoscalinggroup.go +++ b/pkg/cloud/services/autoscaling/autoscalinggroup.go @@ -33,6 +33,7 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" @@ -147,7 +148,7 @@ func (s *Service) ASGIfExists(name *string) (*expinfrav1.AutoScalingGroup, error record.Eventf(s.scope.InfraCluster(), "FailedDescribeAutoScalingGroups", "failed to describe ASG %q: %v", *name, err) return nil, errors.Wrapf(err, "failed to describe AutoScaling Group: %q", *name) case len(out.AutoScalingGroups) == 0: - record.Eventf(s.scope.InfraCluster(), expinfrav1.ASGNotFoundReason, "Unable to find ASG matching %q", *name) + record.Eventf(s.scope.InfraCluster(), expinfrav1beta1.ASGNotFoundReason, "Unable to find ASG matching %q", *name) return nil, nil } return s.SDKToAutoScalingGroup(&out.AutoScalingGroups[0]) diff --git a/pkg/cloud/services/autoscaling/autoscalinggroup_test.go b/pkg/cloud/services/autoscaling/autoscalinggroup_test.go index dbdc29a261..392fbfc93f 100644 --- a/pkg/cloud/services/autoscaling/autoscalinggroup_test.go +++ b/pkg/cloud/services/autoscaling/autoscalinggroup_test.go @@ -42,7 +42,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/autoscaling/mock_autoscalingiface" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestServiceGetASGByName(t *testing.T) { diff --git a/pkg/cloud/services/autoscaling/lifecyclehook.go b/pkg/cloud/services/autoscaling/lifecyclehook.go index 61d194e7b5..2183631b16 100644 --- a/pkg/cloud/services/autoscaling/lifecyclehook.go +++ b/pkg/cloud/services/autoscaling/lifecyclehook.go @@ -30,8 +30,8 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + deprecatedv1beta1conditions "sigs.k8s.io/cluster-api/util/conditions/deprecated/v1beta1" ) // DescribeLifecycleHooks returns the lifecycle hooks for the given AutoScalingGroup after retrieving them from the AWS API. @@ -160,7 +160,7 @@ func getLifecycleHookSpecificationList(lifecycleHooks []expinfrav1.AWSLifecycleH // by creating missing hooks, updating mismatching hooks and // deleting extraneous hooks (except those specified in // ignoreLifecycleHooks). -func ReconcileLifecycleHooks(ctx context.Context, asgService services.ASGInterface, asgName string, wantedLifecycleHooks []expinfrav1.AWSLifecycleHook, ignoreLifecycleHooks map[string]bool, storeConditionsOnObject conditions.Setter, log logger.Wrapper) error { +func ReconcileLifecycleHooks(ctx context.Context, asgService services.ASGInterface, asgName string, wantedLifecycleHooks []expinfrav1.AWSLifecycleHook, ignoreLifecycleHooks map[string]bool, storeConditionsOnObject deprecatedv1beta1conditions.Setter, log logger.Wrapper) error { existingHooks, err := asgService.DescribeLifecycleHooks(asgName) if err != nil { return err @@ -191,7 +191,7 @@ func ReconcileLifecycleHooks(ctx context.Context, asgService services.ASGInterfa if !found { log.Info("Deleting extraneous lifecycle hook", "hook", existingHook.Name) if err := asgService.DeleteLifecycleHook(ctx, asgName, existingHook); err != nil { - conditions.MarkFalse(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition, expinfrav1.LifecycleHookDeletionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + deprecatedv1beta1conditions.MarkFalse(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition, expinfrav1.LifecycleHookDeletionFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } } @@ -208,7 +208,7 @@ func lifecycleHookNeedsUpdate(existing *expinfrav1.AWSLifecycleHook, expected *e existing.NotificationMetadata != expected.NotificationMetadata } -func reconcileLifecycleHook(ctx context.Context, asgService services.ASGInterface, asgName string, wantedHook *expinfrav1.AWSLifecycleHook, existingHooks []*expinfrav1.AWSLifecycleHook, storeConditionsOnObject conditions.Setter, log logger.Wrapper) error { +func reconcileLifecycleHook(ctx context.Context, asgService services.ASGInterface, asgName string, wantedHook *expinfrav1.AWSLifecycleHook, existingHooks []*expinfrav1.AWSLifecycleHook, storeConditionsOnObject deprecatedv1beta1conditions.Setter, log logger.Wrapper) error { log = log.WithValues("hook", wantedHook.Name) log.Info("Checking for existing lifecycle hook") @@ -223,7 +223,7 @@ func reconcileLifecycleHook(ctx context.Context, asgService services.ASGInterfac if existingHook == nil { log.Info("Creating lifecycle hook") if err := asgService.CreateLifecycleHook(ctx, asgName, wantedHook); err != nil { - conditions.MarkFalse(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition, expinfrav1.LifecycleHookCreationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + deprecatedv1beta1conditions.MarkFalse(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition, expinfrav1.LifecycleHookCreationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } return nil @@ -232,11 +232,11 @@ func reconcileLifecycleHook(ctx context.Context, asgService services.ASGInterfac if lifecycleHookNeedsUpdate(existingHook, wantedHook) { log.Info("Updating lifecycle hook") if err := asgService.UpdateLifecycleHook(ctx, asgName, wantedHook); err != nil { - conditions.MarkFalse(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition, expinfrav1.LifecycleHookUpdateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + deprecatedv1beta1conditions.MarkFalse(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition, expinfrav1.LifecycleHookUpdateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) return err } } - conditions.MarkTrue(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition) + deprecatedv1beta1conditions.MarkTrue(storeConditionsOnObject, expinfrav1.LifecycleHookReadyCondition) return nil } diff --git a/pkg/cloud/services/ec2/bastion.go b/pkg/cloud/services/ec2/bastion.go index 89a8e241da..441f5a4721 100644 --- a/pkg/cloud/services/ec2/bastion.go +++ b/pkg/cloud/services/ec2/bastion.go @@ -27,13 +27,14 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -72,8 +73,8 @@ func (s *Service) ReconcileBastion() error { // Describe bastion instance, if any. instance, err := s.describeBastionInstance() if awserrors.IsNotFound(err) { //nolint:nestif - if !conditions.Has(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, infrav1.BastionCreationStartedReason, clusterv1.ConditionSeverityInfo, "") + if !conditions.Has(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition) { + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, infrav1beta1.BastionCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } @@ -98,7 +99,7 @@ func (s *Service) ReconcileBastion() error { // TODO(vincepri): check for possible changes between the default spec and the instance. s.scope.SetBastionInstance(instance.DeepCopy()) - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition) s.scope.Debug("Reconcile bastion completed successfully") return nil @@ -115,20 +116,20 @@ func (s *Service) DeleteBastion() error { return errors.Wrap(err, "unable to describe bastion instance") } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.TerminateInstanceAndWait(instance.ID); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) record.Warnf(s.scope.InfraCluster(), "FailedTerminateBastion", "Failed to terminate bastion instance %q: %v", instance.ID, err) return errors.Wrap(err, "unable to delete bastion instance") } s.scope.SetBastionInstance(nil) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.BastionHostReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") record.Eventf(s.scope.InfraCluster(), "SuccessfulTerminateBastion", "Terminated bastion instance %q", instance.ID) s.scope.Info("Deleted bastion host", "id", instance.ID) diff --git a/pkg/cloud/services/ec2/bastion_test.go b/pkg/cloud/services/ec2/bastion_test.go index 9bb5d304fb..5fcc77d34d 100644 --- a/pkg/cloud/services/ec2/bastion_test.go +++ b/pkg/cloud/services/ec2/bastion_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestServiceDeleteBastion(t *testing.T) { diff --git a/pkg/cloud/services/ec2/helper_test.go b/pkg/cloud/services/ec2/helper_test.go index 550e9d7eb4..889a82e23f 100644 --- a/pkg/cloud/services/ec2/helper_test.go +++ b/pkg/cloud/services/ec2/helper_test.go @@ -24,14 +24,13 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func setupClusterScope(cl client.Client) (*scope.ClusterScope, error) { @@ -175,7 +174,7 @@ func newMachinePool() *clusterv1.MachinePool { Spec: clusterv1.MachinePoolSpec{ Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: ptr.To[string]("v1.23.3"), + Version: "v1.23.3", }, }, }, diff --git a/pkg/cloud/services/ec2/instances.go b/pkg/cloud/services/ec2/instances.go index 5cfa6c55a7..f6d8fb8a0d 100644 --- a/pkg/cloud/services/ec2/instances.go +++ b/pkg/cloud/services/ec2/instances.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // GetRunningInstanceByTags returns the existing instance or nothing if it doesn't exist. @@ -144,7 +144,7 @@ func (s *Service) CreateInstance(ctx context.Context, scope *scope.MachineScope, if scope.AWSMachine.Spec.AMI.ID != nil { //nolint:nestif input.ImageID = *scope.AWSMachine.Spec.AMI.ID } else { - if scope.Machine.Spec.Version == nil { + if scope.Machine.Spec.Version == "" { err := errors.New("Either AWSMachine's spec.ami.id or Machine's spec.version must be defined") scope.SetFailureReason("CreateError") scope.SetFailureMessage(err) @@ -167,12 +167,12 @@ func (s *Service) CreateInstance(ctx context.Context, scope *scope.MachineScope, } if scope.IsEKSManaged() && imageLookupFormat == "" && imageLookupOrg == "" && imageLookupBaseOS == "" { - input.ImageID, err = s.eksAMILookup(ctx, *scope.Machine.Spec.Version, imageArchitecture, scope.AWSMachine.Spec.AMI.EKSOptimizedLookupType) + input.ImageID, err = s.eksAMILookup(ctx, scope.Machine.Spec.Version, imageArchitecture, scope.AWSMachine.Spec.AMI.EKSOptimizedLookupType) if err != nil { return nil, err } } else { - input.ImageID, err = s.defaultAMIIDLookup(imageLookupFormat, imageLookupOrg, imageLookupBaseOS, imageArchitecture, *scope.Machine.Spec.Version) + input.ImageID, err = s.defaultAMIIDLookup(imageLookupFormat, imageLookupOrg, imageLookupBaseOS, imageArchitecture, scope.Machine.Spec.Version) if err != nil { return nil, err } @@ -355,11 +355,11 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { var filtered []types.Subnet var errMessage string for _, subnet := range subnets { - if failureDomain != nil && *subnet.AvailabilityZone != *failureDomain { + if *subnet.AvailabilityZone != failureDomain { // we could have included the failure domain in the query criteria, but then we end up with EC2 error // messages that don't give a good hint about what is really wrong errMessage += fmt.Sprintf(" subnet %q availability zone %q does not match failure domain %q.", - *subnet.SubnetId, *subnet.AvailabilityZone, *failureDomain) + *subnet.SubnetId, *subnet.AvailabilityZone, failureDomain) continue } @@ -395,22 +395,22 @@ func (s *Service) findSubnet(scope *scope.MachineScope) (string, error) { return "", awserrors.NewFailedDependency(errMessage) } return *filtered[0].SubnetId, nil - case failureDomain != nil: + case failureDomain != "": if scope.AWSMachine.Spec.PublicIP != nil && *scope.AWSMachine.Spec.PublicIP { - subnets := s.scope.Subnets().FilterPublic().FilterNonCni().FilterByZone(*failureDomain) + subnets := s.scope.Subnets().FilterPublic().FilterNonCni().FilterByZone(failureDomain) if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q with public IP, no public subnets available in availability zone %q", - scope.Name(), *failureDomain) + scope.Name(), failureDomain) record.Warnf(scope.AWSMachine, "FailedCreate", errMessage) return "", awserrors.NewFailedDependency(errMessage) } return subnets[0].GetResourceID(), nil } - subnets := s.scope.Subnets().FilterPrivate().FilterNonCni().FilterByZone(*failureDomain) + subnets := s.scope.Subnets().FilterPrivate().FilterNonCni().FilterByZone(failureDomain) if len(subnets) == 0 { errMessage := fmt.Sprintf("failed to run machine %q, no subnets available in availability zone %q", - scope.Name(), *failureDomain) + scope.Name(), failureDomain) record.Warnf(scope.AWSMachine, "FailedCreate", errMessage) return "", awserrors.NewFailedDependency(errMessage) } diff --git a/pkg/cloud/services/ec2/instances_test.go b/pkg/cloud/services/ec2/instances_test.go index e8c80df03e..8c37b8875a 100644 --- a/pkg/cloud/services/ec2/instances_test.go +++ b/pkg/cloud/services/ec2/instances_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestInstanceIfExists(t *testing.T) { @@ -435,7 +435,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1c"), + FailureDomain: "us-east-1c", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -569,7 +569,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: aws.String("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1c"), + FailureDomain: "us-east-1c", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -657,7 +657,8 @@ func TestCreateInstance(t *testing.T) { Name: aws.String("availability-zone"), Values: []string{"us-east-1c"}, }, - }})).Return(&ec2.DescribeSubnetsOutput{ + }, + })).Return(&ec2.DescribeSubnetsOutput{ Subnets: []types.Subnet{ { VpcId: aws.String("vpc-incorrect-1"), @@ -829,7 +830,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: aws.String("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1c"), + FailureDomain: "us-east-1c", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -925,7 +926,8 @@ func TestCreateInstance(t *testing.T) { Name: aws.String("availability-zone"), Values: []string{"us-east-1c"}, }, - }})).Return(&ec2.DescribeSubnetsOutput{ + }, + })).Return(&ec2.DescribeSubnetsOutput{ Subnets: []types.Subnet{ { VpcId: aws.String("vpc-bar"), @@ -1082,7 +1084,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -1235,7 +1237,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -1388,7 +1390,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -1542,7 +1544,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1b"), + FailureDomain: "us-east-1b", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -2012,7 +2014,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1b"), + FailureDomain: "us-east-1b", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -2109,7 +2111,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - FailureDomain: aws.String("us-east-1b"), + FailureDomain: "us-east-1b", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -4381,7 +4383,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -4513,7 +4515,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -4646,7 +4648,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -4780,7 +4782,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -4911,7 +4913,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -5042,7 +5044,7 @@ func TestCreateInstance(t *testing.T) { Bootstrap: clusterv1.Bootstrap{ DataSecretName: ptr.To[string]("bootstrap-data"), }, - Version: ptr.To[string]("v1.16.1"), + Version: "v1.16.1", }, }, machineConfig: &infrav1.AWSMachineSpec{ @@ -5352,7 +5354,6 @@ func TestCreateInstance(t *testing.T) { awsCluster: &infrav1.AWSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{ - NetworkSpec: infrav1.NetworkSpec{ Subnets: infrav1.Subnets{ infrav1.SubnetSpec{ @@ -5557,7 +5558,6 @@ func TestCreateInstance(t *testing.T) { awsCluster: &infrav1.AWSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{ - NetworkSpec: infrav1.NetworkSpec{ Subnets: infrav1.Subnets{ infrav1.SubnetSpec{ @@ -5678,7 +5678,6 @@ func TestCreateInstance(t *testing.T) { awsCluster: &infrav1.AWSCluster{ ObjectMeta: metav1.ObjectMeta{Name: "test"}, Spec: infrav1.AWSClusterSpec{ - NetworkSpec: infrav1.NetworkSpec{ Subnets: infrav1.Subnets{ infrav1.SubnetSpec{ @@ -6171,12 +6170,12 @@ func TestCreateInstance(t *testing.T) { Name: "test1", }, Spec: clusterv1.ClusterSpec{ - ClusterNetwork: &clusterv1.ClusterNetwork{ + ClusterNetwork: clusterv1.ClusterNetwork{ ServiceDomain: "cluster.local", - Services: &clusterv1.NetworkRanges{ + Services: clusterv1.NetworkRanges{ CIDRBlocks: []string{"192.168.0.0/16"}, }, - Pods: &clusterv1.NetworkRanges{ + Pods: clusterv1.NetworkRanges{ CIDRBlocks: []string{"192.168.0.0/16"}, }, }, diff --git a/pkg/cloud/services/ec2/launchtemplate.go b/pkg/cloud/services/ec2/launchtemplate.go index f08c9e2eea..1ba96aab34 100644 --- a/pkg/cloud/services/ec2/launchtemplate.go +++ b/pkg/cloud/services/ec2/launchtemplate.go @@ -40,6 +40,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/feature" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" @@ -48,8 +49,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -85,24 +86,24 @@ func (s *Service) ReconcileLaunchTemplate( scope.Info("checking for existing launch template") launchTemplate, launchTemplateUserDataHash, launchTemplateUserDataSecretKey, _, err := ec2svc.GetLaunchTemplate(scope.LaunchTemplateName()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) + conditions.MarkUnknown(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateNotFoundReason, "%s", err.Error()) return nil, err } imageID, err := ec2svc.DiscoverLaunchTemplateAMI(ctx, scope) if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateCreateFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } - var ignitionStorageType = infrav1.DefaultMachinePoolIgnitionStorageType + ignitionStorageType := infrav1.DefaultMachinePoolIgnitionStorageType if ignition := ignitionScope.Ignition(); ignition != nil { ignitionStorageType = ignition.StorageType } var userDataForLaunchTemplate []byte if bootstrapDataFormat == "ignition" && ignitionStorageType == infrav1.IgnitionStorageTypeOptionClusterObjectStore { - var ignitionVersion = infrav1.DefaultIgnitionVersion + ignitionVersion := infrav1.DefaultIgnitionVersion if ignition := ignitionScope.Ignition(); ignition != nil { ignitionVersion = ignition.Version } @@ -120,16 +121,15 @@ func (s *Service) ReconcileLaunchTemplate( // Previously, user data was always written into the launch template, so we check // `AWSMachinePool.Spec.Ignition != nil` to toggle the S3 feature on for `AWSMachinePool` objects. objectURL, err := objectStoreSvc.CreateForMachinePool(ctx, scope, bootstrapData) - if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } semver, err := semver.ParseTolerant(ignitionVersion) if err != nil { err = errors.Wrapf(err, "failed to parse ignition version %q", ignitionVersion) - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } @@ -152,7 +152,7 @@ func (s *Service) ReconcileLaunchTemplate( userDataForLaunchTemplate, err = json.Marshal(ignData) if err != nil { err = errors.Wrap(err, "failed to convert ignition config to JSON") - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } case 3: @@ -172,12 +172,12 @@ func (s *Service) ReconcileLaunchTemplate( userDataForLaunchTemplate, err = json.Marshal(ignData) if err != nil { err = errors.Wrap(err, "failed to convert ignition config to JSON") - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } default: err = errors.Errorf("unsupported ignition version %q", ignitionVersion) - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateReconcileFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } } else { @@ -192,7 +192,7 @@ func (s *Service) ReconcileLaunchTemplate( scope.Info("no existing launch template found, creating") launchTemplateID, err := ec2svc.CreateLaunchTemplate(scope, imageID, *bootstrapDataSecretKey, userDataForLaunchTemplate, userdata.ComputeHash(bootstrapData)) if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateCreateFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateCreateFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } @@ -205,7 +205,7 @@ func (s *Service) ReconcileLaunchTemplate( if scope.GetLaunchTemplateIDStatus() == "" { launchTemplateID, err := ec2svc.GetLaunchTemplateID(scope.LaunchTemplateName()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) + conditions.MarkUnknown(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateNotFoundReason, "%s", err.Error()) return nil, err } scope.SetLaunchTemplateIDStatus(launchTemplateID) @@ -217,7 +217,7 @@ func (s *Service) ReconcileLaunchTemplate( if scope.GetLaunchTemplateLatestVersionStatus() == "" { launchTemplateVersion, err := ec2svc.GetLaunchTemplateLatestVersion(scope.GetLaunchTemplateIDStatus()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1.LaunchTemplateReadyCondition, expinfrav1.LaunchTemplateNotFoundReason, "%s", err.Error()) + conditions.MarkUnknown(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateNotFoundReason, "%s", err.Error()) return nil, err } scope.SetLaunchTemplateLatestVersionStatus(launchTemplateVersion) @@ -303,7 +303,6 @@ func (s *Service) ReconcileLaunchTemplate( scope.Info("Deleting S3 object for deleted launch template version", "version", *deletedLaunchTemplateVersion.VersionNumber) err = objectStoreSvc.DeleteForMachinePool(ctx, scope, *deletedLaunchTemplateVersionBootstrapDataHash) - // If any error happened above, log it and continue if err != nil { scope.Error(err, "Failed to delete S3 object for deleted launch template version, continuing because the bucket lifecycle policy will clean it later", "version", *deletedLaunchTemplateVersion.VersionNumber) @@ -327,10 +326,10 @@ func (s *Service) ReconcileLaunchTemplate( if needsUpdate || tagsChanged || amiChanged || userDataSecretKeyChanged { if err := runPostLaunchTemplateUpdateOperation(); err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition, expinfrav1.PostLaunchTemplateUpdateOperationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.PostLaunchTemplateUpdateOperationCondition, expinfrav1beta1.PostLaunchTemplateUpdateOperationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } - conditions.MarkTrue(scope.GetSetter(), expinfrav1.PostLaunchTemplateUpdateOperationCondition) + conditions.MarkTrue(scope.GetSetter(), expinfrav1beta1.PostLaunchTemplateUpdateOperationCondition) } return nil, nil @@ -1064,7 +1063,7 @@ func (s *Service) DiscoverLaunchTemplateAMI(ctx context.Context, scope scope.Lau } templateVersion := scope.GetMachinePool().Spec.Template.Spec.Version - if templateVersion == nil { + if templateVersion == "" { err := errors.New("Either AWSMachinePool's spec.awslaunchtemplate.ami.id or MachinePool's spec.template.spec.version must be defined") s.scope.Error(err, "") return nil, err @@ -1105,7 +1104,7 @@ func (s *Service) DiscoverLaunchTemplateAMI(ctx context.Context, scope scope.Lau if scope.IsEKSManaged() && imageLookupFormat == "" && imageLookupOrg == "" && imageLookupBaseOS == "" { lookupAMI, err = s.eksAMILookup( ctx, - *templateVersion, + templateVersion, imageArchitecture, scope.GetLaunchTemplate().AMI.EKSOptimizedLookupType, ) @@ -1118,7 +1117,7 @@ func (s *Service) DiscoverLaunchTemplateAMI(ctx context.Context, scope scope.Lau imageLookupOrg, imageLookupBaseOS, imageArchitecture, - *templateVersion, + templateVersion, ) if err != nil { return nil, err diff --git a/pkg/cloud/services/ec2/launchtemplate_test.go b/pkg/cloud/services/ec2/launchtemplate_test.go index fff8074586..cb3b53ad77 100644 --- a/pkg/cloud/services/ec2/launchtemplate_test.go +++ b/pkg/cloud/services/ec2/launchtemplate_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm/mock_ssmiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( @@ -82,8 +82,10 @@ users: var testUserDataHash = userdata.ComputeHash([]byte(testUserData)) -var testBootstrapData = []byte("different from testUserData since bootstrap data may be in S3 while EC2 user data points to that S3 object") -var testBootstrapDataHash = userdata.ComputeHash(testBootstrapData) +var ( + testBootstrapData = []byte("different from testUserData since bootstrap data may be in S3 while EC2 user data points to that S3 object") + testBootstrapDataHash = userdata.ComputeHash(testBootstrapData) +) func defaultEC2AndDataTags(name string, clusterName string, userDataSecretKey types.NamespacedName, bootstrapDataHash string) []ec2types.Tag { tags := defaultEC2Tags(name, clusterName) @@ -1800,7 +1802,7 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) { }, machineTemplate: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: aws.String(DefaultAmiNameFormat), + Version: DefaultAmiNameFormat, }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { @@ -1851,7 +1853,7 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) { }, machineTemplate: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: aws.String(DefaultAmiNameFormat), + Version: DefaultAmiNameFormat, }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { @@ -1902,7 +1904,7 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) { }, machineTemplate: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: aws.String(DefaultAmiNameFormat), + Version: DefaultAmiNameFormat, }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { @@ -1974,7 +1976,7 @@ func TestDiscoverLaunchTemplateAMI(t *testing.T) { }, machineTemplate: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ - Version: aws.String(DefaultAmiNameFormat), + Version: DefaultAmiNameFormat, }, }, expect: func(m *mocks.MockEC2APIMockRecorder) { diff --git a/pkg/cloud/services/eks/cluster.go b/pkg/cloud/services/eks/cluster.go index 20cf81b541..5980fbe485 100644 --- a/pkg/cloud/services/eks/cluster.go +++ b/pkg/cloud/services/eks/cluster.go @@ -34,6 +34,7 @@ import ( "k8s.io/utils/ptr" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + ekscontrolplanev1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta1" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" @@ -42,8 +43,9 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cmp" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/tristate" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileCluster(ctx context.Context) error { @@ -207,12 +209,12 @@ func (s *Service) setStatus(cluster *ekstypes.Cluster) error { case ekstypes.ClusterStatusActive: s.scope.ControlPlane.Status.Ready = true s.scope.ControlPlane.Status.FailureMessage = nil - if conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition) { + if conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneCreatingCondition) { record.Eventf(s.scope.ControlPlane, "SuccessfulCreateEKSControlPlane", "Created new EKS control plane %s", s.scope.KubernetesClusterName()) - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition, "created", clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneCreatingCondition, "created", clusterv1beta1.ConditionSeverityInfo, "") } - if conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition, "updated", clusterv1.ConditionSeverityInfo, "") + if conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) { + conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition, "updated", clusterv1beta1.ConditionSeverityInfo, "") record.Eventf(s.scope.ControlPlane, "SuccessfulUpdateEKSControlPlane", "Updated EKS control plane %s", s.scope.KubernetesClusterName()) } if s.scope.ControlPlane.Spec.UpgradePolicy == ekscontrolplanev1.UpgradePolicyStandard && @@ -523,7 +525,7 @@ func (s *Service) createCluster(ctx context.Context, eksClusterName string) (*ek if out, err = s.EKSClient.CreateCluster(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneCreatingCondition) + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneCreatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedCreateEKSControlPlane", "Initiated creation of a new EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }, awserrors.ResourceNotFound); err != nil { // TODO: change the error that can be retried @@ -581,7 +583,7 @@ func (s *Service) reconcileClusterConfig(ctx context.Context, cluster *ekstypes. if _, err := s.EKSClient.UpdateClusterConfig(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated update of a new EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }); err != nil { @@ -624,7 +626,7 @@ func (s *Service) reconcileAccessConfig(ctx context.Context, accessConfig *eksty return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated auth config update for EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }); err != nil { @@ -653,7 +655,7 @@ func (s *Service) reconcileLogging(ctx context.Context, logging *ekstypes.Loggin if _, err := s.EKSClient.UpdateClusterConfig(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated logging update for EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }); err != nil { @@ -801,7 +803,7 @@ func (s *Service) reconcileClusterVersion(ctx context.Context, cluster *ekstypes return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated update of EKS control plane %s to version %s", s.scope.KubernetesClusterName(), nextVersionString) return true, nil @@ -872,7 +874,7 @@ func (s *Service) updateEncryptionConfig(ctx context.Context, updatedEncryptionC return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneUpdatingCondition) + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEncryptionConfig", "Initiated update of encryption config in EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil diff --git a/pkg/cloud/services/eks/cluster_test.go b/pkg/cloud/services/eks/cluster_test.go index 91972c9fa4..f51ee4c340 100644 --- a/pkg/cloud/services/eks/cluster_test.go +++ b/pkg/cloud/services/eks/cluster_test.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/mock_eksiface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestMakeEKSEncryptionConfigs(t *testing.T) { diff --git a/pkg/cloud/services/eks/config.go b/pkg/cloud/services/eks/config.go index ba11e4bcd7..0ac413f729 100644 --- a/pkg/cloud/services/eks/config.go +++ b/pkg/cloud/services/eks/config.go @@ -35,7 +35,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/secret" diff --git a/pkg/cloud/services/eks/config_test.go b/pkg/cloud/services/eks/config_test.go index f1d0d2d299..c9c3334a14 100644 --- a/pkg/cloud/services/eks/config_test.go +++ b/pkg/cloud/services/eks/config_test.go @@ -20,7 +20,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/secret" ) diff --git a/pkg/cloud/services/eks/eks.go b/pkg/cloud/services/eks/eks.go index b0a4811d8d..aed1575ac7 100644 --- a/pkg/cloud/services/eks/eks.go +++ b/pkg/cloud/services/eks/eks.go @@ -23,12 +23,12 @@ import ( "github.com/pkg/errors" "k8s.io/klog/v2" - ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + ekscontrolplanev1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta1" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ReconcileControlPlane reconciles a EKS control plane. @@ -37,31 +37,31 @@ func (s *Service) ReconcileControlPlane(ctx context.Context) error { // Control Plane IAM Role if err := s.reconcileControlPlaneIAMRole(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.IAMControlPlaneRolesReadyCondition, ekscontrolplanev1.IAMControlPlaneRolesReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.IAMControlPlaneRolesReadyCondition, ekscontrolplanev1beta1.IAMControlPlaneRolesReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.IAMControlPlaneRolesReadyCondition) + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.IAMControlPlaneRolesReadyCondition) // EKS Cluster if err := s.reconcileCluster(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition, ekscontrolplanev1.EKSControlPlaneReconciliationFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneReadyCondition, ekscontrolplanev1beta1.EKSControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSControlPlaneReadyCondition) + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneReadyCondition) // EKS Addons if err := s.reconcileAddons(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSAddonsConfiguredCondition, ekscontrolplanev1.EKSAddonsConfiguredFailedReason, clusterv1.ConditionSeverityError, "%s", err.Error()) + conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSAddonsConfiguredCondition, ekscontrolplanev1beta1.EKSAddonsConfiguredFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrap(err, "failed reconciling eks addons") } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSAddonsConfiguredCondition) + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSAddonsConfiguredCondition) // EKS Identity Provider if err := s.reconcileIdentityProvider(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1.EKSIdentityProviderConfiguredCondition, ekscontrolplanev1.EKSIdentityProviderConfiguredFailedReason, clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSIdentityProviderConfiguredCondition, ekscontrolplanev1beta1.EKSIdentityProviderConfiguredFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return errors.Wrap(err, "failed reconciling eks identity provider") } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1.EKSIdentityProviderConfiguredCondition) + conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSIdentityProviderConfiguredCondition) s.scope.Debug("Reconcile EKS control plane completed successfully") return nil @@ -97,28 +97,28 @@ func (s *NodegroupService) ReconcilePool(ctx context.Context) error { if err := s.reconcileNodegroupIAMRole(ctx); err != nil { conditions.MarkFalse( s.scope.ManagedMachinePool, - expinfrav1.IAMNodegroupRolesReadyCondition, - expinfrav1.IAMNodegroupRolesReconciliationFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.IAMNodegroupRolesReadyCondition, + expinfrav1beta1.IAMNodegroupRolesReconciliationFailedReason, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) return err } - conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1.IAMNodegroupRolesReadyCondition) + conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1beta1.IAMNodegroupRolesReadyCondition) if err := s.reconcileNodegroup(ctx); err != nil { conditions.MarkFalse( s.scope.ManagedMachinePool, - expinfrav1.EKSNodegroupReadyCondition, - expinfrav1.EKSNodegroupReconciliationFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.EKSNodegroupReadyCondition, + expinfrav1beta1.EKSNodegroupReconciliationFailedReason, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) return err } - conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1.EKSNodegroupReadyCondition) + conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1beta1.EKSNodegroupReadyCondition) return nil } diff --git a/pkg/cloud/services/eks/fargate.go b/pkg/cloud/services/eks/fargate.go index 2054e8eec5..5da499d6c5 100644 --- a/pkg/cloud/services/eks/fargate.go +++ b/pkg/cloud/services/eks/fargate.go @@ -29,11 +29,12 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" + expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func requeueProfileUpdating() reconcile.Result { @@ -52,9 +53,9 @@ func (s *FargateService) Reconcile(ctx context.Context) (reconcile.Result, error if err != nil { conditions.MarkFalse( s.scope.FargateProfile, - expinfrav1.IAMFargateRolesReadyCondition, - expinfrav1.IAMFargateRolesReconciliationFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.IAMFargateRolesReadyCondition, + expinfrav1beta1.IAMFargateRolesReconciliationFailedReason, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) @@ -66,15 +67,15 @@ func (s *FargateService) Reconcile(ctx context.Context) (reconcile.Result, error return requeueRoleUpdating(), nil } - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.IAMFargateRolesReadyCondition) + conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.IAMFargateRolesReadyCondition) requeue, err = s.reconcileFargateProfile(ctx) if err != nil { conditions.MarkFalse( s.scope.FargateProfile, clusterv1.ReadyCondition, - expinfrav1.EKSFargateReconciliationFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.EKSFargateReconciliationFailedReason, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) @@ -124,34 +125,34 @@ func (s *FargateService) handleStatus(profile *ekstypes.FargateProfile) (requeue switch profile.Status { case ekstypes.FargateProfileStatusCreating: s.scope.FargateProfile.Status.Ready = false - if conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) { - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition, expinfrav1.EKSFargateCreatingReason, clusterv1.ConditionSeverityInfo, "") + if conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) { + conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition, expinfrav1beta1.EKSFargateCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") } - if !conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition) { + if !conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition) { record.Eventf(s.scope.FargateProfile, "InitiatedCreateEKSFargateProfile", "Started creating EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition) + conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition) } - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateCreatingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") case ekstypes.FargateProfileStatusCreateFailed, ekstypes.FargateProfileStatusDeleteFailed: s.scope.FargateProfile.Status.Ready = false s.scope.FargateProfile.Status.FailureMessage = aws.String(fmt.Sprintf("unexpected profile status: %s", string(profile.Status))) - reason := expinfrav1.EKSFargateFailedReason + reason := expinfrav1beta1.EKSFargateFailedReason s.scope.FargateProfile.Status.FailureReason = &reason - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateFailedReason, clusterv1.ConditionSeverityError, "") + conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateFailedReason, clusterv1beta1.ConditionSeverityError, "") case ekstypes.FargateProfileStatusActive: s.scope.FargateProfile.Status.Ready = true - if conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition) { + if conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition) { record.Eventf(s.scope.FargateProfile, "SuccessfulCreateEKSFargateProfile", "Created new EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateCreatingCondition, expinfrav1.EKSFargateCreatedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition, expinfrav1beta1.EKSFargateCreatedReason, clusterv1beta1.ConditionSeverityInfo, "") } - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition) + conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition) case ekstypes.FargateProfileStatusDeleting: s.scope.FargateProfile.Status.Ready = false - if !conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) { + if !conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) { record.Eventf(s.scope.FargateProfile, "InitiatedDeleteEKSFargateProfile", "Started deleting EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) + conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) } - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateDeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") } switch profile.Status { case ekstypes.FargateProfileStatusCreating, ekstypes.FargateProfileStatusDeleting: @@ -170,8 +171,8 @@ func (s *FargateService) ReconcileDelete(ctx context.Context) (reconcile.Result, conditions.MarkFalse( s.scope.FargateProfile, clusterv1.ReadyCondition, - expinfrav1.EKSFargateReconciliationFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.EKSFargateReconciliationFailedReason, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) @@ -186,9 +187,9 @@ func (s *FargateService) ReconcileDelete(ctx context.Context) (reconcile.Result, if err != nil { conditions.MarkFalse( s.scope.FargateProfile, - expinfrav1.IAMFargateRolesReadyCondition, - expinfrav1.IAMFargateRolesReconciliationFailedReason, - clusterv1.ConditionSeverityError, + expinfrav1beta1.IAMFargateRolesReadyCondition, + expinfrav1beta1.IAMFargateRolesReconciliationFailedReason, + clusterv1beta1.ConditionSeverityError, "%s", err.Error(), ) @@ -272,11 +273,11 @@ func (s *FargateService) deleteFargateProfile(ctx context.Context) (requeue bool return false, errors.Wrap(err, "failed to describe profile") } if profile == nil { - if conditions.IsTrue(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition) { + if conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) { record.Eventf(s.scope.FargateProfile, "SuccessfulDeleteEKSFargateProfile", "Deleted EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateDeletingCondition, expinfrav1.EKSFargateDeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition, expinfrav1beta1.EKSFargateDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1.EKSFargateProfileReadyCondition, expinfrav1.EKSFargateDeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return false, nil } diff --git a/pkg/cloud/services/eks/nodegroup.go b/pkg/cloud/services/eks/nodegroup.go index 708ccaf6b1..6d9f5553d9 100644 --- a/pkg/cloud/services/eks/nodegroup.go +++ b/pkg/cloud/services/eks/nodegroup.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/annotations" ) @@ -266,7 +266,7 @@ func (s *NodegroupService) createNodegroup(ctx context.Context) (*ekstypes.Nodeg func (s *NodegroupService) deleteNodegroupAndWait(ctx context.Context) (reterr error) { eksClusterName := s.scope.KubernetesClusterName() nodegroupName := s.scope.NodegroupName() - if err := s.scope.NodegroupReadyFalse(clusterv1.DeletingReason, ""); err != nil { + if err := s.scope.NodegroupReadyFalse(clusterv1beta1.DeletingReason, ""); err != nil { return err } defer func() { @@ -277,7 +277,7 @@ func (s *NodegroupService) deleteNodegroupAndWait(ctx context.Context) (reterr e if err := s.scope.NodegroupReadyFalse("DeletingFailed", reterr.Error()); err != nil { reterr = err } - } else if err := s.scope.NodegroupReadyFalse(clusterv1.DeletedReason, ""); err != nil { + } else if err := s.scope.NodegroupReadyFalse(clusterv1beta1.DeletedReason, ""); err != nil { reterr = err } }() diff --git a/pkg/cloud/services/eks/oidc_test.go b/pkg/cloud/services/eks/oidc_test.go index 5f92151030..f54b33c67e 100644 --- a/pkg/cloud/services/eks/oidc_test.go +++ b/pkg/cloud/services/eks/oidc_test.go @@ -43,7 +43,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/iamauth/mock_iamauth" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/testcert" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" +) func TestOIDCReconcile(t *testing.T) { testCertThumbprint := getTestcertTumbprint(t) diff --git a/pkg/cloud/services/eks/roles.go b/pkg/cloud/services/eks/roles.go index 0e32c36359..bd9150a3c4 100644 --- a/pkg/cloud/services/eks/roles.go +++ b/pkg/cloud/services/eks/roles.go @@ -31,7 +31,8 @@ import ( eksiam "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/eks/iam" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/eks" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1") + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" +) const ( maxIAMRoleNameLength = 64 @@ -243,7 +244,7 @@ func (s *NodegroupService) reconcileNodegroupIAMRole(ctx context.Context) error } func (s *NodegroupService) deleteNodegroupIAMRole(ctx context.Context) (reterr error) { - if err := s.scope.IAMReadyFalse(clusterv1.DeletingReason, ""); err != nil { + if err := s.scope.IAMReadyFalse(clusterv1beta1.DeletingReason, ""); err != nil { return err } defer func() { @@ -254,7 +255,7 @@ func (s *NodegroupService) deleteNodegroupIAMRole(ctx context.Context) (reterr e if err := s.scope.IAMReadyFalse("DeletingFailed", reterr.Error()); err != nil { reterr = err } - } else if err := s.scope.IAMReadyFalse(clusterv1.DeletedReason, ""); err != nil { + } else if err := s.scope.IAMReadyFalse(clusterv1beta1.DeletedReason, ""); err != nil { reterr = err } }() @@ -355,7 +356,7 @@ func (s *FargateService) reconcileFargateIAMRole(ctx context.Context) (requeue b } func (s *FargateService) deleteFargateIAMRole(ctx context.Context) (reterr error) { - if err := s.scope.IAMReadyFalse(clusterv1.DeletingReason, ""); err != nil { + if err := s.scope.IAMReadyFalse(clusterv1beta1.DeletingReason, ""); err != nil { return err } defer func() { @@ -366,7 +367,7 @@ func (s *FargateService) deleteFargateIAMRole(ctx context.Context) (reterr error if err := s.scope.IAMReadyFalse("DeletingFailed", reterr.Error()); err != nil { reterr = err } - } else if err := s.scope.IAMReadyFalse(clusterv1.DeletedReason, ""); err != nil { + } else if err := s.scope.IAMReadyFalse(clusterv1beta1.DeletedReason, ""); err != nil { reterr = err } }() diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go index 6a81816094..258b673c71 100644 --- a/pkg/cloud/services/elb/loadbalancer.go +++ b/pkg/cloud/services/elb/loadbalancer.go @@ -39,6 +39,7 @@ import ( "k8s.io/apiserver/pkg/storage/names" "k8s.io/utils/ptr" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" @@ -46,8 +47,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/hash" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ResourceGroups are filtered by ARN identifier: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-syntax @@ -682,7 +683,7 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { return errors.Wrap(err, "failed to get control plane load balancer name") } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } @@ -690,7 +691,7 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { apiELB, err := s.describeClassicELB(ctx, elbName) if IsNotFound(err) { s.scope.Debug("Control plane load balancer not found, skipping deletion") - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } if err != nil { @@ -699,13 +700,13 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { if apiELB.IsUnmanaged(s.scope.Name()) { s.scope.Debug("Found unmanaged classic load balancer for apiserver, skipping deletion", "api-server-elb-name", apiELB.Name) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } s.scope.Debug("deleting load balancer", "name", elbName) if err := s.deleteClassicELB(ctx, elbName); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -717,7 +718,7 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { return errors.Wrapf(err, "failed to wait for %q load balancer deletion", s.scope.Name()) } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.Info("Deleted control plane load balancer", "name", elbName) return nil } @@ -792,7 +793,7 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad if err != nil { return errors.Wrap(err, "failed to get control plane load balancer name") } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } @@ -811,7 +812,7 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad } s.scope.Debug("deleting load balancer", "name", name) if err := s.deleteLB(ctx, lb.ARN); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -823,7 +824,7 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad return errors.Wrapf(err, "failed to wait for %q load balancer deletion", s.scope.Name()) } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.LoadBalancerReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.Info("Deleted control plane load balancer", "name", name) return nil diff --git a/pkg/cloud/services/elb/loadbalancer_test.go b/pkg/cloud/services/elb/loadbalancer_test.go index 66cf054ff5..c29096cdeb 100644 --- a/pkg/cloud/services/elb/loadbalancer_test.go +++ b/pkg/cloud/services/elb/loadbalancer_test.go @@ -42,12 +42,14 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client/fake" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) var stubInfraV1TargetGroupSpecAPI = infrav1.TargetGroupSpec{ @@ -2964,12 +2966,12 @@ func TestDeleteAPIServerELB(t *testing.T) { }).Return(nil, &elbtypes.AccessPointNotFoundException{}) }, verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) { - loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) if loadBalancerConditionReady { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } - loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) - if loadBalancerConditionReason != clusterv1.DeletedReason { + loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1beta1.LoadBalancerReadyCondition) + if loadBalancerConditionReason != clusterv1beta1.DeletedReason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } }, @@ -3013,12 +3015,12 @@ func TestDeleteAPIServerELB(t *testing.T) { ) }, verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) { - loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) if loadBalancerConditionReady { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } - loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) - if loadBalancerConditionReason != clusterv1.DeletedReason { + loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1beta1.LoadBalancerReadyCondition) + if loadBalancerConditionReason != clusterv1beta1.DeletedReason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } }, @@ -3075,12 +3077,12 @@ func TestDeleteAPIServerELB(t *testing.T) { ) }, verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) { - loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1.LoadBalancerReadyCondition) + loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) if loadBalancerConditionReady { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } - loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1.LoadBalancerReadyCondition) - if loadBalancerConditionReason != clusterv1.DeletedReason { + loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1beta1.LoadBalancerReadyCondition) + if loadBalancerConditionReason != clusterv1beta1.DeletedReason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } }, diff --git a/pkg/cloud/services/gc/cleanup_test.go b/pkg/cloud/services/gc/cleanup_test.go index 0be1138c27..31e9c8cbc2 100644 --- a/pkg/cloud/services/gc/cleanup_test.go +++ b/pkg/cloud/services/gc/cleanup_test.go @@ -39,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestReconcileDelete(t *testing.T) { @@ -938,11 +938,10 @@ func createEKSCluster() *clusterv1.Cluster { Namespace: "default", }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "AWSManagedControlPlane", - APIVersion: ekscontrolplanev1.GroupVersion.String(), - Name: "cp1", - Namespace: "default", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSManagedControlPlane", + APIGroup: ekscontrolplanev1.GroupVersion.Group, + Name: "cp1", }, }, } @@ -1021,11 +1020,10 @@ func createUnmanagedCluster() *clusterv1.Cluster { Namespace: "default", }, Spec: clusterv1.ClusterSpec{ - InfrastructureRef: &corev1.ObjectReference{ - Kind: "AWSCluster", - APIVersion: infrav1.GroupVersion.String(), - Name: "cluster1", - Namespace: "default", + InfrastructureRef: clusterv1.ContractVersionedObjectReference{ + Kind: "AWSCluster", + APIGroup: infrav1.GroupVersion.Group, + Name: "cluster1", }, }, } diff --git a/pkg/cloud/services/iamauth/reconcile.go b/pkg/cloud/services/iamauth/reconcile.go index 0f47a98634..e01b574a2a 100644 --- a/pkg/cloud/services/iamauth/reconcile.go +++ b/pkg/cloud/services/iamauth/reconcile.go @@ -23,14 +23,13 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/iam" "github.com/pkg/errors" - corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // ReconcileIAMAuthenticator is used to create the aws-iam-authenticator in a cluster. @@ -140,7 +139,7 @@ func (s *Service) getRolesForMachineDeployments(ctx context.Context, allRoles ma Namespace: s.scope.Namespace(), }, awsMachineTemplate) if err != nil { - return fmt.Errorf("failed to get AWSMachine %s/%s: %w", ref.Namespace, ref.Name, err) + return fmt.Errorf("failed to get AWSMachine %s/%s: %w", s.scope.Namespace(), ref.Name, err) } instanceProfile := awsMachineTemplate.Spec.Template.Spec.IAMInstanceProfile if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" { @@ -179,14 +178,14 @@ func (s *Service) getRolesForMachinePools(ctx context.Context, allRoles map[stri return nil } -func (s *Service) getRolesForAWSMachinePool(ctx context.Context, ref corev1.ObjectReference, allRoles map[string]struct{}) error { +func (s *Service) getRolesForAWSMachinePool(ctx context.Context, ref clusterv1.ContractVersionedObjectReference, allRoles map[string]struct{}) error { awsMachinePool := &expinfrav1.AWSMachinePool{} err := s.client.Get(ctx, client.ObjectKey{ Name: ref.Name, Namespace: s.scope.Namespace(), }, awsMachinePool) if err != nil { - return fmt.Errorf("failed to get AWSMachine %s/%s: %w", ref.Namespace, ref.Name, err) + return fmt.Errorf("failed to get AWSMachine %s/%s: %w", s.scope.Namespace(), ref.Name, err) } instanceProfile := awsMachinePool.Spec.AWSLaunchTemplate.IamInstanceProfile if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" { @@ -195,14 +194,14 @@ func (s *Service) getRolesForAWSMachinePool(ctx context.Context, ref corev1.Obje return nil } -func (s *Service) getRolesForAWSManagedMachinePool(ctx context.Context, ref corev1.ObjectReference, allRoles map[string]struct{}) error { +func (s *Service) getRolesForAWSManagedMachinePool(ctx context.Context, ref clusterv1.ContractVersionedObjectReference, allRoles map[string]struct{}) error { awsManagedMachinePool := &expinfrav1.AWSManagedMachinePool{} err := s.client.Get(ctx, client.ObjectKey{ Name: ref.Name, Namespace: s.scope.Namespace(), }, awsManagedMachinePool) if err != nil { - return fmt.Errorf("failed to get AWSMachine %s/%s: %w", ref.Namespace, ref.Name, err) + return fmt.Errorf("failed to get AWSMachine %s/%s: %w", s.scope.Namespace(), ref.Name, err) } instanceProfile := awsManagedMachinePool.Spec.RoleName if _, ok := allRoles[instanceProfile]; !ok && instanceProfile != "" { diff --git a/pkg/cloud/services/iamauth/reconcile_test.go b/pkg/cloud/services/iamauth/reconcile_test.go index ae8ae86556..79663d2b99 100644 --- a/pkg/cloud/services/iamauth/reconcile_test.go +++ b/pkg/cloud/services/iamauth/reconcile_test.go @@ -23,7 +23,6 @@ import ( "github.com/golang/mock/gomock" . "github.com/onsi/gomega" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/utils/ptr" @@ -31,7 +30,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" ) @@ -59,22 +58,20 @@ func TestReconcileIAMAuth(t *testing.T) { eksCluster := createEKSCluster(name, ns) g.Expect(testEnv.Create(ctx, eksCluster)).To(Succeed()) awsMP := createAWSMachinePoolForClusterWithInstanceProfile(name, ns, eksCluster.Name, "nodes.cluster-api-provider-aws.sigs.k8s.io") - infraRef := corev1.ObjectReference{ - Kind: awsMP.TypeMeta.Kind, - Name: awsMP.Name, - Namespace: awsMP.Namespace, - APIVersion: awsMP.TypeMeta.APIVersion, + infraRef := clusterv1.ContractVersionedObjectReference{ + Kind: awsMP.TypeMeta.Kind, + Name: awsMP.Name, + APIGroup: awsMP.TypeMeta.GroupVersionKind().Group, } g.Expect(testEnv.Create(ctx, awsMP)).To(Succeed()) mp := createMachinepoolForCluster(name, ns, eksCluster.Name, infraRef) g.Expect(testEnv.Create(ctx, mp)).To(Succeed()) awsMachineTemplate := createAWSMachineTemplateForClusterWithInstanceProfile(name, ns, eksCluster.Name, "eks-nodes.cluster-api-provider-aws.sigs.k8s.io") - infraRefForMD := corev1.ObjectReference{ - Kind: awsMachineTemplate.TypeMeta.Kind, - Name: awsMachineTemplate.Name, - Namespace: awsMachineTemplate.Namespace, - APIVersion: awsMachineTemplate.TypeMeta.APIVersion, + infraRefForMD := clusterv1.ContractVersionedObjectReference{ + Kind: awsMachineTemplate.TypeMeta.Kind, + Name: awsMachineTemplate.Name, + APIGroup: awsMachineTemplate.TypeMeta.GroupVersionKind().Group, } g.Expect(testEnv.Create(ctx, awsMachineTemplate)).To(Succeed()) md := createMachineDeploymentForCluster(name, ns, eksCluster.Name, infraRefForMD) @@ -145,7 +142,7 @@ func createAWSMachinePoolForClusterWithInstanceProfile(name, namespace, clusterN return awsMP } -func createMachinepoolForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *clusterv1.MachinePool { +func createMachinepoolForCluster(name, namespace, clusterName string, infrastructureRef clusterv1.ContractVersionedObjectReference) *clusterv1.MachinePool { mp := &clusterv1.MachinePool{ ObjectMeta: metav1.ObjectMeta{ Name: name, @@ -191,7 +188,7 @@ func createAWSMachineTemplateForClusterWithInstanceProfile(name, namespace, clus return mt } -func createMachineDeploymentForCluster(name, namespace, clusterName string, infrastructureRef corev1.ObjectReference) *clusterv1.MachineDeployment { +func createMachineDeploymentForCluster(name, namespace, clusterName string, infrastructureRef clusterv1.ContractVersionedObjectReference) *clusterv1.MachineDeployment { md := &clusterv1.MachineDeployment{ ObjectMeta: metav1.ObjectMeta{ Name: name, diff --git a/pkg/cloud/services/iamauth/suite_test.go b/pkg/cloud/services/iamauth/suite_test.go index eaf1dda70f..48064de119 100644 --- a/pkg/cloud/services/iamauth/suite_test.go +++ b/pkg/cloud/services/iamauth/suite_test.go @@ -29,7 +29,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( diff --git a/pkg/cloud/services/instancestate/helpers_test.go b/pkg/cloud/services/instancestate/helpers_test.go index d2d10b05ca..53f9f922f9 100644 --- a/pkg/cloud/services/instancestate/helpers_test.go +++ b/pkg/cloud/services/instancestate/helpers_test.go @@ -23,7 +23,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func setupCluster(clusterName string) (*scope.ClusterScope, error) { diff --git a/pkg/cloud/services/network/carriergateways.go b/pkg/cloud/services/network/carriergateways.go index af89c43bc4..2876344b75 100644 --- a/pkg/cloud/services/network/carriergateways.go +++ b/pkg/cloud/services/network/carriergateways.go @@ -25,7 +25,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" - infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/filter" @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileCarrierGateway() error { @@ -78,7 +78,7 @@ func (s *Service) reconcileCarrierGateway() error { record.Warnf(s.scope.InfraCluster(), "FailedTagCarrierGateway", "Failed to tag managed Carrier Gateway %q: %v", cagw.CarrierGatewayId, err) return errors.Wrapf(err, "failed to tag carrier gateway %q", *cagw.CarrierGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition) return nil } diff --git a/pkg/cloud/services/network/carriergateways_test.go b/pkg/cloud/services/network/carriergateways_test.go index a1bc0d8ac6..722446eff9 100644 --- a/pkg/cloud/services/network/carriergateways_test.go +++ b/pkg/cloud/services/network/carriergateways_test.go @@ -33,7 +33,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestReconcileCarrierGateway(t *testing.T) { diff --git a/pkg/cloud/services/network/egress_only_gateways.go b/pkg/cloud/services/network/egress_only_gateways.go index 1827957ac2..24a411a1e4 100644 --- a/pkg/cloud/services/network/egress_only_gateways.go +++ b/pkg/cloud/services/network/egress_only_gateways.go @@ -25,6 +25,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" @@ -33,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileEgressOnlyInternetGateways() error { @@ -79,7 +80,7 @@ func (s *Service) reconcileEgressOnlyInternetGateways() error { record.Warnf(s.scope.InfraCluster(), "FailedTagEgressOnlyInternetGateway", "Failed to tag managed Egress Only Internet Gateway %q: %v", gateway.EgressOnlyInternetGatewayId, err) return errors.Wrapf(err, "failed to tag egress only internet gateway %q", *gateway.EgressOnlyInternetGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition) return nil } diff --git a/pkg/cloud/services/network/egress_only_gateways_test.go b/pkg/cloud/services/network/egress_only_gateways_test.go index 56d65d9880..ee3f57a47d 100644 --- a/pkg/cloud/services/network/egress_only_gateways_test.go +++ b/pkg/cloud/services/network/egress_only_gateways_test.go @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestReconcileEgressOnlyInternetGateways(t *testing.T) { diff --git a/pkg/cloud/services/network/eips_test.go b/pkg/cloud/services/network/eips_test.go index 643f9bb177..7992367fd4 100644 --- a/pkg/cloud/services/network/eips_test.go +++ b/pkg/cloud/services/network/eips_test.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestServiceReleaseAddresses(t *testing.T) { diff --git a/pkg/cloud/services/network/gateways.go b/pkg/cloud/services/network/gateways.go index ee9fa65692..4b06b80fa4 100644 --- a/pkg/cloud/services/network/gateways.go +++ b/pkg/cloud/services/network/gateways.go @@ -25,6 +25,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" @@ -33,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileInternetGateways() error { @@ -74,7 +75,7 @@ func (s *Service) reconcileInternetGateways() error { record.Warnf(s.scope.InfraCluster(), "FailedTagInternetGateway", "Failed to tag managed Internet Gateway %q: %v", gateway.InternetGatewayId, err) return errors.Wrapf(err, "failed to tag internet gateway %q", *gateway.InternetGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition) return nil } diff --git a/pkg/cloud/services/network/gateways_test.go b/pkg/cloud/services/network/gateways_test.go index 48c0b5b047..d1b0f7dfd0 100644 --- a/pkg/cloud/services/network/gateways_test.go +++ b/pkg/cloud/services/network/gateways_test.go @@ -32,7 +32,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestReconcileInternetGateways(t *testing.T) { diff --git a/pkg/cloud/services/network/natgateways.go b/pkg/cloud/services/network/natgateways.go index 8f8cad527c..109f2f2b23 100644 --- a/pkg/cloud/services/network/natgateways.go +++ b/pkg/cloud/services/network/natgateways.go @@ -28,6 +28,7 @@ import ( "github.com/pkg/errors" kerrors "k8s.io/apimachinery/pkg/util/errors" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" @@ -36,8 +37,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileNatGateways() error { @@ -56,18 +57,18 @@ func (s *Service) reconcileNatGateways() error { s.scope.Debug("No private subnets available, skipping NAT gateways") conditions.MarkFalse( s.scope.InfraCluster(), - infrav1.NatGatewaysReadyCondition, - infrav1.NatGatewaysReconciliationFailedReason, - clusterv1.ConditionSeverityWarning, + infrav1beta1.NatGatewaysReadyCondition, + infrav1beta1.NatGatewaysReconciliationFailedReason, + clusterv1beta1.ConditionSeverityWarning, "No private subnets available, skipping NAT gateways") return nil } else if len(s.scope.Subnets().FilterPublic().FilterNonCni()) == 0 { s.scope.Debug("No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.") conditions.MarkFalse( s.scope.InfraCluster(), - infrav1.NatGatewaysReadyCondition, - infrav1.NatGatewaysReconciliationFailedReason, - clusterv1.ConditionSeverityWarning, + infrav1beta1.NatGatewaysReadyCondition, + infrav1beta1.NatGatewaysReconciliationFailedReason, + clusterv1beta1.ConditionSeverityWarning, "No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.") return nil } @@ -80,8 +81,8 @@ func (s *Service) reconcileNatGateways() error { // Batch the creation of NAT gateways if len(subnetIDs) > 0 { // set NatGatewayCreationStarted if the condition has never been set before - if !conditions.Has(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysCreationStartedReason, clusterv1.ConditionSeverityInfo, "") + if !conditions.Has(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition) { + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, infrav1beta1.NatGatewaysCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } @@ -100,7 +101,7 @@ func (s *Service) reconcileNatGateways() error { if err != nil { return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition) } return nil diff --git a/pkg/cloud/services/network/natgateways_test.go b/pkg/cloud/services/network/natgateways_test.go index 97d7ff8900..7c5eee7e39 100644 --- a/pkg/cloud/services/network/natgateways_test.go +++ b/pkg/cloud/services/network/natgateways_test.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( diff --git a/pkg/cloud/services/network/network.go b/pkg/cloud/services/network/network.go index 1550a1e78f..4bc9c95dc3 100644 --- a/pkg/cloud/services/network/network.go +++ b/pkg/cloud/services/network/network.go @@ -19,11 +19,12 @@ package network import ( "k8s.io/klog/v2" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/v2/util/conditions" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/conditions" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ReconcileNetwork reconciles the network of the given cluster. @@ -32,66 +33,66 @@ func (s *Service) ReconcileNetwork() (err error) { // VPC. if err := s.reconcileVPC(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, infrav1beta1.VpcReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.VpcReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition) // Secondary CIDRs if err := s.associateSecondaryCidrs(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, infrav1.SecondaryCidrReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, infrav1beta1.SecondaryCidrReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition) // Subnets. if err := s.reconcileSubnets(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, infrav1.SubnetsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, infrav1beta1.SubnetsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition) // Internet Gateways. if err := s.reconcileInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, infrav1.InternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, infrav1beta1.InternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition) // Carrier Gateway. if err := s.reconcileCarrierGateway(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, infrav1.CarrierGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition, infrav1beta1.CarrierGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition) // Egress Only Internet Gateways. if err := s.reconcileEgressOnlyInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, infrav1.EgressOnlyInternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, infrav1beta1.EgressOnlyInternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition) // NAT Gateways. if err := s.reconcileNatGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, infrav1.NatGatewaysReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, infrav1beta1.NatGatewaysReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition) // Routing tables. if err := s.reconcileRouteTables(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, infrav1.RouteTableReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, infrav1beta1.RouteTableReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition) // VPC Endpoints. if err := s.reconcileVPCEndpoints(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, infrav1.VpcEndpointsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, infrav1beta1.VpcEndpointsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition) s.scope.Debug("Reconcile network completed successfully") return nil @@ -120,40 +121,40 @@ func (s *Service) DeleteNetwork() (err error) { vpc.DeepCopyInto(s.scope.VPC()) // VPC Endpoints. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteVPCEndpoints(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcEndpointsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Routing tables. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteRouteTables(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // NAT Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteNatGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.NatGatewaysReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // EIPs. if err := s.releaseAddresses(); err != nil { @@ -161,68 +162,68 @@ func (s *Service) DeleteNetwork() (err error) { } // Internet Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.InternetGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Carrier Gateway. if s.scope.VPC().CarrierGatewayID != nil { if err := s.deleteCarrierGateway(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.CarrierGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } // Egress Only Internet Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteEgressOnlyInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.EgressOnlyInternetGatewayReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Subnets. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteSubnets(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Secondary CIDR. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.disassociateSecondaryCidrs(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.SecondaryCidrsReadyCondition, "DisassociateFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, "DisassociateFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } // VPC. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteVPC(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.Debug("Delete network completed successfully") return nil diff --git a/pkg/cloud/services/network/routetables.go b/pkg/cloud/services/network/routetables.go index 21dd039ff1..e438ac7338 100644 --- a/pkg/cloud/services/network/routetables.go +++ b/pkg/cloud/services/network/routetables.go @@ -25,6 +25,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" @@ -33,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -122,7 +123,7 @@ func (s *Service) reconcileRouteTables() error { s.scope.Debug("Subnet has been associated with route table", "subnet-id", sn.GetResourceID(), "route-table-id", rt.ID) sn.RouteTableID = aws.String(rt.ID) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.RouteTablesReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition) return nil } diff --git a/pkg/cloud/services/network/routetables_test.go b/pkg/cloud/services/network/routetables_test.go index 6b81c91585..526ec1d537 100644 --- a/pkg/cloud/services/network/routetables_test.go +++ b/pkg/cloud/services/network/routetables_test.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestReconcileRouteTables(t *testing.T) { diff --git a/pkg/cloud/services/network/secondarycidr_test.go b/pkg/cloud/services/network/secondarycidr_test.go index d2e9b46a35..d6e8368a27 100644 --- a/pkg/cloud/services/network/secondarycidr_test.go +++ b/pkg/cloud/services/network/secondarycidr_test.go @@ -35,13 +35,13 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func setupNewManagedControlPlaneScope(cl client.Client) (*scope.ManagedControlPlaneScope, error) { return scope.NewManagedControlPlaneScope(scope.ManagedControlPlaneScopeParams{ Client: cl, - Cluster: &v1beta1.Cluster{}, + Cluster: &clusterv1.Cluster{}, ControlPlane: &ekscontrolplanev1.AWSManagedControlPlane{ Spec: ekscontrolplanev1.AWSManagedControlPlaneSpec{ SecondaryCidrBlock: ptr.To[string]("secondary-cidr"), @@ -101,7 +101,8 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { {CidrBlock: aws.String("secondary-cidr")}, }, }, - }}, nil) + }, + }, nil) }, }, { @@ -123,7 +124,8 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { { CidrBlockAssociationSet: []types.VpcCidrBlockAssociation{}, }, - }}, nil) + }, + }, nil) m.AssociateVpcCidrBlock(context.TODO(), gomock.AssignableToTypeOf(&ec2.AssociateVpcCidrBlockInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure")) }, wantErr: true, @@ -137,7 +139,8 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { { CidrBlockAssociationSet: []types.VpcCidrBlockAssociation{}, }, - }}, nil) + }, + }, nil) m.AssociateVpcCidrBlock(context.TODO(), gomock.AssignableToTypeOf(&ec2.AssociateVpcCidrBlockInput{})).Return(&ec2.AssociateVpcCidrBlockOutput{ CidrBlockAssociation: &types.VpcCidrBlockAssociation{ AssociationId: ptr.To[string]("association-id-success"), @@ -179,7 +182,8 @@ func TestServiceAssociateSecondaryCidr(t *testing.T) { }, }, }, - }}, nil) + }, + }, nil) // ...the other two should be created m.AssociateVpcCidrBlock(context.TODO(), gomock.Eq(&ec2.AssociateVpcCidrBlockInput{ @@ -286,7 +290,8 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { {CidrBlock: aws.String("secondary-cidr")}, }, }, - }}, nil) + }, + }, nil) m.DisassociateVpcCidrBlock(context.TODO(), gomock.AssignableToTypeOf(&ec2.DisassociateVpcCidrBlockInput{})).Return(nil, nil) }, }, @@ -301,7 +306,8 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { {CidrBlock: aws.String("secondary-cidr")}, }, }, - }}, nil) + }, + }, nil) m.DisassociateVpcCidrBlock(context.TODO(), gomock.AssignableToTypeOf(&ec2.DisassociateVpcCidrBlockInput{})).Return(nil, awserrors.NewFailedDependency("dependency-failure")) }, wantErr: true, @@ -315,7 +321,8 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { { CidrBlockAssociationSet: []types.VpcCidrBlockAssociation{}, }, - }}, nil) + }, + }, nil) // No calls expected m.DisassociateVpcCidrBlock(context.TODO(), gomock.Any()).Times(0) @@ -366,7 +373,8 @@ func TestServiceDiassociateSecondaryCidr(t *testing.T) { }, }, }, - }}, nil) + }, + }, nil) m.DisassociateVpcCidrBlock(context.TODO(), gomock.Eq(&ec2.DisassociateVpcCidrBlockInput{ AssociationId: ptr.To[string]("association-id-existing-1"), // 10.0.1.0/24 (see above) diff --git a/pkg/cloud/services/network/subnets.go b/pkg/cloud/services/network/subnets.go index f339a9a8c0..eefff53c3f 100644 --- a/pkg/cloud/services/network/subnets.go +++ b/pkg/cloud/services/network/subnets.go @@ -30,6 +30,7 @@ import ( "github.com/aws/aws-sdk-go-v2/service/ec2/types" "github.com/pkg/errors" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" @@ -39,7 +40,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cidr" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -205,7 +206,7 @@ func (s *Service) reconcileSubnets() error { } s.scope.Debug("Reconciled subnets", "subnets", subnets) - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.SubnetsReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition) return nil } diff --git a/pkg/cloud/services/network/subnets_test.go b/pkg/cloud/services/network/subnets_test.go index 48238d2f77..af10b34712 100644 --- a/pkg/cloud/services/network/subnets_test.go +++ b/pkg/cloud/services/network/subnets_test.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( diff --git a/pkg/cloud/services/network/vpc.go b/pkg/cloud/services/network/vpc.go index 853269eda7..e8fe82ec3d 100644 --- a/pkg/cloud/services/network/vpc.go +++ b/pkg/cloud/services/network/vpc.go @@ -27,6 +27,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" @@ -36,7 +37,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) @@ -137,8 +138,8 @@ func (s *Service) reconcileVPC() error { s.scope.VPC().Tags = vpc.Tags s.scope.VPC().ID = vpc.ID - if !conditions.Has(s.scope.InfraCluster(), infrav1.VpcReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.VpcReadyCondition, infrav1.VpcCreationStartedReason, clusterv1.ConditionSeverityInfo, "") + if !conditions.Has(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition) { + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, infrav1beta1.VpcCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } diff --git a/pkg/cloud/services/network/vpc_test.go b/pkg/cloud/services/network/vpc_test.go index 0c5f086962..9b9f3a1e4d 100644 --- a/pkg/cloud/services/network/vpc_test.go +++ b/pkg/cloud/services/network/vpc_test.go @@ -34,7 +34,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func describeVpcAttributeTrue(_ context.Context, input *ec2.DescribeVpcAttributeInput, _ ...ec2.Options) (*ec2.DescribeVpcAttributeOutput, error) { diff --git a/pkg/cloud/services/s3/s3_test.go b/pkg/cloud/services/s3/s3_test.go index 22e4475877..0f4dbb2e78 100644 --- a/pkg/cloud/services/s3/s3_test.go +++ b/pkg/cloud/services/s3/s3_test.go @@ -43,7 +43,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/s3/mock_s3iface" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/sts/mock_stsiface" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) const ( diff --git a/pkg/cloud/services/secretsmanager/secret_test.go b/pkg/cloud/services/secretsmanager/secret_test.go index c7d898a034..8c07fcdfe0 100644 --- a/pkg/cloud/services/secretsmanager/secret_test.go +++ b/pkg/cloud/services/secretsmanager/secret_test.go @@ -35,7 +35,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) func TestServiceCreate(t *testing.T) { diff --git a/pkg/cloud/services/securitygroup/securitygroups.go b/pkg/cloud/services/securitygroup/securitygroups.go index bd5ec437dd..1323faeb6f 100644 --- a/pkg/cloud/services/securitygroup/securitygroups.go +++ b/pkg/cloud/services/securitygroup/securitygroups.go @@ -28,6 +28,7 @@ import ( kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" + infrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta1" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/converters" @@ -38,7 +39,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) @@ -197,7 +198,7 @@ func (s *Service) ReconcileSecurityGroups() error { s.scope.Debug("Authorized ingress rules in security group", "authorized-ingress-rules", toAuthorize, "security-group-id", sg.ID) } } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition) + conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition) return nil } @@ -308,7 +309,7 @@ func (s *Service) ec2SecurityGroupToSecurityGroup(ec2SecurityGroup types.Securit func (s *Service) DeleteSecurityGroups() error { if s.scope.VPC().ID == "" { s.scope.Debug("Skipping security group deletion, vpc-id is nil", "vpc-id", s.scope.VPC().ID) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } @@ -322,7 +323,7 @@ func (s *Service) DeleteSecurityGroups() error { return nil } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1.DeletingReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } @@ -331,7 +332,7 @@ func (s *Service) DeleteSecurityGroups() error { sg := clusterGroups[i] current := sg.IngressRules if err := s.revokeAllSecurityGroupIngressRules(sg.ID); awserrors.IsIgnorableSecurityGroupError(err) != nil { //nolint:gocritic - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -343,10 +344,10 @@ func (s *Service) DeleteSecurityGroups() error { } if err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1.ConditionSeverityWarning, "%s", err.Error()) + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1.ClusterSecurityGroupsReadyCondition, clusterv1.DeletedReason, clusterv1.ConditionSeverityInfo, "") + conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } diff --git a/pkg/cloud/services/securitygroup/securitygroups_test.go b/pkg/cloud/services/securitygroup/securitygroups_test.go index ee20404063..4a2a7b33b1 100644 --- a/pkg/cloud/services/securitygroup/securitygroups_test.go +++ b/pkg/cloud/services/securitygroup/securitygroups_test.go @@ -41,7 +41,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( diff --git a/pkg/cloud/services/ssm/secret_test.go b/pkg/cloud/services/ssm/secret_test.go index abb06b6d49..0f15dc9f48 100644 --- a/pkg/cloud/services/ssm/secret_test.go +++ b/pkg/cloud/services/ssm/secret_test.go @@ -38,7 +38,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/ssm/mock_ssmiface" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) type mockAPIError struct { diff --git a/pkg/utils/utils.go b/pkg/utils/utils.go index eba75fe50d..886470530c 100644 --- a/pkg/utils/utils.go +++ b/pkg/utils/utils.go @@ -9,7 +9,7 @@ import ( "k8s.io/utils/ptr" crclient "sigs.k8s.io/controller-runtime/pkg/client" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // GetMachinePools belong to a cluster. diff --git a/test/e2e/shared/common.go b/test/e2e/shared/common.go index 56e65a7763..38553e3f5c 100644 --- a/test/e2e/shared/common.go +++ b/test/e2e/shared/common.go @@ -37,7 +37,7 @@ import ( crclient "sigs.k8s.io/controller-runtime/pkg/client" infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" diff --git a/test/e2e/suites/managed/control_plane_helpers.go b/test/e2e/suites/managed/control_plane_helpers.go index b21bcf4c10..9b376cf142 100644 --- a/test/e2e/suites/managed/control_plane_helpers.go +++ b/test/e2e/suites/managed/control_plane_helpers.go @@ -33,7 +33,7 @@ import ( crclient "sigs.k8s.io/controller-runtime/pkg/client" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" clusterctl "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) diff --git a/test/e2e/suites/managed/machine_deployment.go b/test/e2e/suites/managed/machine_deployment.go index 5fef37e5d2..9a584f6a77 100644 --- a/test/e2e/suites/managed/machine_deployment.go +++ b/test/e2e/suites/managed/machine_deployment.go @@ -30,7 +30,7 @@ import ( "k8s.io/utils/ptr" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) diff --git a/test/e2e/suites/managed/machine_deployment_helpers.go b/test/e2e/suites/managed/machine_deployment_helpers.go index 5513809e7b..a3c6c49ee4 100644 --- a/test/e2e/suites/managed/machine_deployment_helpers.go +++ b/test/e2e/suites/managed/machine_deployment_helpers.go @@ -28,7 +28,7 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" ) type deleteMachineDeploymentInput struct { diff --git a/test/e2e/suites/managed/managed_suite_test.go b/test/e2e/suites/managed/managed_suite_test.go index f8f4ec596d..2371dfda53 100644 --- a/test/e2e/suites/managed/managed_suite_test.go +++ b/test/e2e/suites/managed/managed_suite_test.go @@ -32,7 +32,7 @@ import ( ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) var ( diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index c143533870..5f88d4219d 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -51,12 +51,12 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" bootstrapv1 "sigs.k8s.io/cluster-api/api/bootstrap/kubeadm/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/api/controlplane/kubeadm/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // GetClusterByName returns a Cluster object given his name. @@ -415,7 +415,7 @@ func LatestCIReleaseForVersion(searchVersion string) (string, error) { type conditionAssertion struct { conditionType clusterv1.ConditionType status corev1.ConditionStatus - severity clusterv1.ConditionSeverity + severity clusterv1beta1.ConditionSeverity reason string } diff --git a/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go b/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go index dc1128e47f..8e1b04204e 100644 --- a/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_classic_elb_upgrade_test.go @@ -40,7 +40,7 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index 17a9c75652..614c6ddbff 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -41,10 +41,10 @@ import ( infrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/exp/instancestate" "sigs.k8s.io/cluster-api-provider-aws/v2/test/e2e/shared" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/conditions" + "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const TestSvc = "test-svc-" @@ -967,7 +967,7 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { } Expect(err).To(BeNil()) return conditions.IsFalse(awsCluster, infrav1.VpcEndpointsReadyCondition) && - conditions.GetReason(awsCluster, infrav1.VpcEndpointsReadyCondition) == clusterv1.DeletedReason + conditions.GetReason(awsCluster, infrav1.VpcEndpointsReadyCondition) == clusterv1beta1.DeletedReason }, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...).Should(BeTrue(), "Eventually failed waiting for AWSCluster to show VPC endpoint as deleted in conditions") }) diff --git a/test/helpers/envtest.go b/test/helpers/envtest.go index 0663a9d768..56030a879d 100644 --- a/test/helpers/envtest.go +++ b/test/helpers/envtest.go @@ -51,7 +51,7 @@ import ( metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" "sigs.k8s.io/controller-runtime/pkg/webhook" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log" utilyaml "sigs.k8s.io/cluster-api/util/yaml" ) diff --git a/util/conditions/helper.go b/util/conditions/helper.go index 9dfd97b919..b6faab2563 100644 --- a/util/conditions/helper.go +++ b/util/conditions/helper.go @@ -18,16 +18,17 @@ limitations under the License. package conditions import ( - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + "k8s.io/apimachinery/pkg/api/meta" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" ) // ErrorConditionAfterInit returns severity error, if the control plane is initialized; otherwise, returns severity warning. // Failures after control plane is initialized is likely to be non-transient, // hence conditions severities should be set to Error. -func ErrorConditionAfterInit(getter conditions.Getter) clusterv1.ConditionSeverity { - if conditions.IsTrue(getter, clusterv1.ControlPlaneInitializedCondition) { - return clusterv1.ConditionSeverityError +func ErrorConditionAfterInit(cluster *clusterv1.Cluster) clusterv1beta1.ConditionSeverity { + if meta.IsStatusConditionTrue(cluster.GetConditions(), string(clusterv1beta1.ControlPlaneInitializedCondition)) { + return clusterv1beta1.ConditionSeverityError } - return clusterv1.ConditionSeverityWarning + return clusterv1beta1.ConditionSeverityWarning } diff --git a/util/paused/paused.go b/util/paused/paused.go index 6cb56a50da..21a8cb8577 100644 --- a/util/paused/paused.go +++ b/util/paused/paused.go @@ -32,12 +32,24 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) +const ( + // PausedCondition is the condition type for paused status + PausedCondition clusterv1beta1.ConditionType = "Paused" + + // PausedReason is the reason when an object is paused + PausedReason = "Paused" + + // NotPausedReason is the reason when an object is not paused + NotPausedReason = "NotPaused" +) + // ConditionSetter combines the client.Object and Setter interface. type ConditionSetter interface { conditions.Setter @@ -46,8 +58,8 @@ type ConditionSetter interface { // EnsurePausedCondition sets the paused condition on the object and returns if it should be considered as paused. func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, obj ConditionSetter) (isPaused bool, conditionChanged bool, err error) { - oldCondition := conditions.Get(obj, clusterv1.PausedV1Beta2Condition) - newCondition := pausedCondition(c.Scheme(), cluster, obj, clusterv1.PausedV1Beta2Condition) + oldCondition := conditions.Get(obj, PausedCondition) + newCondition := pausedCondition(c.Scheme(), cluster, obj, string(PausedCondition)) isPaused = newCondition.Status == corev1.ConditionTrue @@ -74,8 +86,8 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste conditions.Set(obj, &newCondition) - if err := patchHelper.Patch(ctx, obj, patch.WithOwnedV1Beta2Conditions{Conditions: []string{ - clusterv1.PausedV1Beta2Condition, + if err := patchHelper.Patch(ctx, obj, patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + PausedCondition, }}); err != nil { return isPaused, false, err } @@ -84,10 +96,10 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste } // pausedCondition sets the paused condition on the object and returns if it should be considered as paused. -func pausedCondition(scheme *runtime.Scheme, cluster *clusterv1.Cluster, obj ConditionSetter, targetConditionType string) clusterv1.Condition { - if (cluster != nil && cluster.Spec.Paused) || annotations.HasPaused(obj) { +func pausedCondition(scheme *runtime.Scheme, cluster *clusterv1.Cluster, obj ConditionSetter, targetConditionType string) clusterv1beta1.Condition { + if (cluster != nil && cluster.Spec.Paused != nil && *cluster.Spec.Paused) || annotations.HasPaused(obj) { var messages []string - if cluster != nil && cluster.Spec.Paused { + if cluster != nil && cluster.Spec.Paused != nil && *cluster.Spec.Paused { messages = append(messages, "Cluster spec.paused is set to true") } if annotations.HasPaused(obj) { @@ -98,17 +110,17 @@ func pausedCondition(scheme *runtime.Scheme, cluster *clusterv1.Cluster, obj Con messages = append(messages, fmt.Sprintf("%s has the cluster.x-k8s.io/paused annotation", kind)) } - return clusterv1.Condition{ - Type: clusterv1.ConditionType(targetConditionType), + return clusterv1beta1.Condition{ + Type: clusterv1beta1.ConditionType(targetConditionType), Status: corev1.ConditionTrue, - Reason: clusterv1.PausedV1Beta2Reason, + Reason: PausedReason, Message: strings.Join(messages, ", "), } } - return clusterv1.Condition{ - Type: clusterv1.ConditionType(targetConditionType), + return clusterv1beta1.Condition{ + Type: clusterv1beta1.ConditionType(targetConditionType), Status: corev1.ConditionFalse, - Reason: clusterv1.NotPausedV1Beta2Reason, + Reason: NotPausedReason, } } diff --git a/util/paused/paused_test.go b/util/paused/paused_test.go index 02f706a438..72e9940dca 100644 --- a/util/paused/paused_test.go +++ b/util/paused/paused_test.go @@ -24,10 +24,11 @@ import ( . "github.com/onsi/gomega" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/fake" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta1" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/test/builder" ) @@ -48,7 +49,7 @@ func TestEnsurePausedCondition(t *testing.T) { // Cluster Case 2: paused pausedCluster := normalCluster.DeepCopy() - pausedCluster.Spec.Paused = true + pausedCluster.Spec.Paused = ptr.To(true) // Object case 1: unpaused obj := &builder.Phase2Obj{ObjectMeta: metav1.ObjectMeta{ From 38b8a7fa6b3ade76507cc7b93e09596e13cdc44e Mon Sep 17 00:00:00 2001 From: Borja Clemente Date: Thu, 23 Oct 2025 17:34:03 +0200 Subject: [PATCH 7/9] Update linting pkg alias and fix broken imports blocks Signed-off-by: Borja Clemente --- .golangci.yml | 36 ++++++++++--------- .../suites/managed/control_plane_helpers.go | 3 +- .../managed/machine_deployment_helpers.go | 3 +- 3 files changed, 24 insertions(+), 18 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index e48f55626d..ba98fa5247 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -139,8 +139,6 @@ linters: alias: runtimeserializer - pkg: k8s.io/apimachinery/pkg/runtime/serializer/yaml alias: yamlserializer - - pkg: sigs.k8s.io/cluster-api/api/v1beta1 - alias: clusterv1 - pkg: sigs.k8s.io/cluster-api/util/defaulting alias: utildefaulting - pkg: sigs.k8s.io/controller-runtime @@ -169,8 +167,14 @@ linters: alias: crclient - pkg: k8s.io/apimachinery/pkg/types alias: apimachinerytypes - - pkg: "sigs.k8s.io/cluster-api/api/core/v1beta1" + - pkg: "sigs.k8s.io/cluster-api/api/core/v1beta2" alias: clusterv1 + - pkg: "sigs.k8s.io/cluster-api/api/core/v1beta1" + alias: clusterv1beta1 + - pkg: "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + alias: v1beta1patch + - pkg: "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + alias: v1beta1conditions no-unaliased: false nolintlint: require-specific: true @@ -212,16 +216,16 @@ linters: # - errcheck # text: Error return value of `outputPrinter.Print` is not checked - linters: - - gosec - text: 'G103: Use of unsafe calls should be audited' + - gosec + text: "G103: Use of unsafe calls should be audited" path: .*(api|types|test)\/.*\/.*conversion.*\.go$ - linters: - staticcheck - text: 'QF1008: could remove embedded field .*' + text: "QF1008: could remove embedded field .*" # TODO: change to use time.Time.Equal - linters: - staticcheck - text: 'QF1009: probably want to use time.Time.Equal instead' + text: "QF1009: probably want to use time.Time.Equal instead" - linters: - revive # Ignoring stylistic checks for generated code @@ -233,13 +237,13 @@ linters: # Ignoring stylistic checks for generated code path: .*(api|types)\/.*\/.*conversion.*\.go$ # By convention, receiver names in a method should reflect their identity. - text: 'receiver-naming: receiver name (.+) should be consistent with previous receiver name (.+)' + text: "receiver-naming: receiver name (.+) should be consistent with previous receiver name (.+)" - linters: - revive # Ignoring stylistic checks for generated code path: .*(api|types|test)\/.*\/.*conversion.*\.go$ # Checking if an error is nil to just after return the error or nil is redundant - text: 'if-return: redundant if ...; err != nil check, just return error instead' + text: "if-return: redundant if ...; err != nil check, just return error instead" - linters: - revive text: 'exported: exported method .*\.(Reconcile|SetupWithManager|SetupWebhookWithManager) should have comment or be unexported' @@ -268,7 +272,7 @@ linters: text: Error return value of (.+) is not checked - linters: - gosec - text: 'G108: Profiling endpoint is automatically exposed on /debug/pprof' + text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" - linters: - godot path: (.*)/(v1beta1|v1beta2)/(.*)types.go @@ -282,17 +286,17 @@ linters: - linters: - revive path: .*/defaults.go - text: 'var-naming: don''t use underscores in Go names; func (.+) should be (.+)' + text: "var-naming: don't use underscores in Go names; func (.+) should be (.+)" - linters: - revive path: .*/.*(mock|gc_).*/.+\.go - text: 'var-naming: don''t use an underscore in package name' + text: "var-naming: don't use an underscore in package name" - linters: - revive # Ignoring stylistic checks for generated code path: .*(api|types|test)\/.*\/.*conversion.*\.go$ # This rule warns when initialism, variable or package naming conventions are not followed. - text: 'var-naming: don''t use underscores in Go names' + text: "var-naming: don't use underscores in Go names" - linters: - unparam text: always receives @@ -309,15 +313,15 @@ linters: text: cyclomatic complexity - linters: - gocritic - text: 'appendAssign: append result not assigned to the same slice' + text: "appendAssign: append result not assigned to the same slice" - path: (.+)\.go$ text: (Expect directory permissions to be 0750 or less|Expect file permissions to be 0600 or less) - path: (.+)\.go$ - text: 'exported: (func|type) name will be used as (.+) by other packages, and that stutters; consider calling this (.+)' + text: "exported: (func|type) name will be used as (.+) by other packages, and that stutters; consider calling this (.+)" - path: (.+)\.go$ text: (G104|G107|G404|G505|ST1000) - path: (.+)\.go$ - text: 'G108: Profiling endpoint is automatically exposed on /debug/pprof' + text: "G108: Profiling endpoint is automatically exposed on /debug/pprof" - path: (.+)\.go$ text: net/http.Get must not be called - linters: diff --git a/test/e2e/suites/managed/control_plane_helpers.go b/test/e2e/suites/managed/control_plane_helpers.go index 9b376cf142..f9ecf73f62 100644 --- a/test/e2e/suites/managed/control_plane_helpers.go +++ b/test/e2e/suites/managed/control_plane_helpers.go @@ -33,7 +33,8 @@ import ( crclient "sigs.k8s.io/controller-runtime/pkg/client" ekscontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/eks/api/v1beta2" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/test/framework" clusterctl "sigs.k8s.io/cluster-api/test/framework/clusterctl" ) diff --git a/test/e2e/suites/managed/machine_deployment_helpers.go b/test/e2e/suites/managed/machine_deployment_helpers.go index a3c6c49ee4..be31599e37 100644 --- a/test/e2e/suites/managed/machine_deployment_helpers.go +++ b/test/e2e/suites/managed/machine_deployment_helpers.go @@ -28,7 +28,8 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/client" - clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" + clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" + "sigs.k8s.io/cluster-api/test/framework" ) type deleteMachineDeploymentInput struct { From 0b8af5f130be8569f4e086c24311a41be77d5bd3 Mon Sep 17 00:00:00 2001 From: Borja Clemente Date: Fri, 24 Oct 2025 12:29:44 +0200 Subject: [PATCH 8/9] Remove unnecessary Paused constants Signed-off-by: Borja Clemente --- util/paused/paused.go | 27 ++++++++------------------- 1 file changed, 8 insertions(+), 19 deletions(-) diff --git a/util/paused/paused.go b/util/paused/paused.go index 21a8cb8577..8390f2c005 100644 --- a/util/paused/paused.go +++ b/util/paused/paused.go @@ -36,18 +36,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/annotations" "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" - patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" -) - -const ( - // PausedCondition is the condition type for paused status - PausedCondition clusterv1beta1.ConditionType = "Paused" - - // PausedReason is the reason when an object is paused - PausedReason = "Paused" - - // NotPausedReason is the reason when an object is not paused - NotPausedReason = "NotPaused" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ConditionSetter combines the client.Object and Setter interface. @@ -58,8 +47,8 @@ type ConditionSetter interface { // EnsurePausedCondition sets the paused condition on the object and returns if it should be considered as paused. func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, obj ConditionSetter) (isPaused bool, conditionChanged bool, err error) { - oldCondition := conditions.Get(obj, PausedCondition) - newCondition := pausedCondition(c.Scheme(), cluster, obj, string(PausedCondition)) + oldCondition := conditions.Get(obj, clusterv1beta1.PausedV1Beta2Condition) + newCondition := pausedCondition(c.Scheme(), cluster, obj, string(clusterv1beta1.PausedV1Beta2Condition)) isPaused = newCondition.Status == corev1.ConditionTrue @@ -73,7 +62,7 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste return isPaused, false, nil } - patchHelper, err := patch.NewHelper(obj, c) + patchHelper, err := v1beta1patch.NewHelper(obj, c) if err != nil { return isPaused, false, err } @@ -86,8 +75,8 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste conditions.Set(obj, &newCondition) - if err := patchHelper.Patch(ctx, obj, patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ - PausedCondition, + if err := patchHelper.Patch(ctx, obj, v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + clusterv1beta1.PausedV1Beta2Condition, }}); err != nil { return isPaused, false, err } @@ -113,7 +102,7 @@ func pausedCondition(scheme *runtime.Scheme, cluster *clusterv1.Cluster, obj Con return clusterv1beta1.Condition{ Type: clusterv1beta1.ConditionType(targetConditionType), Status: corev1.ConditionTrue, - Reason: PausedReason, + Reason: clusterv1beta1.PausedV1Beta2Reason, Message: strings.Join(messages, ", "), } } @@ -121,6 +110,6 @@ func pausedCondition(scheme *runtime.Scheme, cluster *clusterv1.Cluster, obj Con return clusterv1beta1.Condition{ Type: clusterv1beta1.ConditionType(targetConditionType), Status: corev1.ConditionFalse, - Reason: NotPausedReason, + Reason: clusterv1beta1.NotPausedV1Beta2Reason, } } From d5c7e73f697b2ee8b1c8f54704eaa0aed858f765 Mon Sep 17 00:00:00 2001 From: Borja Clemente Date: Fri, 24 Oct 2025 15:24:16 +0200 Subject: [PATCH 9/9] Fix import aliases Signed-off-by: Borja Clemente --- .../eks/controllers/eksconfig_controller.go | 20 ++--- controllers/awscluster_controller.go | 24 ++--- controllers/awsmachine_controller.go | 62 ++++++------- controllers/awsmachine_controller_test.go | 4 +- controllers/helpers_test.go | 4 +- .../awsmanagedcontrolplane_controller.go | 14 +-- .../rosacontrolplane_controller.go | 40 ++++----- .../rosacontrolplane_controller_test.go | 4 +- exp/controllers/awsfargatepool_controller.go | 6 +- exp/controllers/awsmachinepool_controller.go | 26 +++--- .../awsmachinepool_controller_test.go | 4 +- .../awsmanagedmachinepool_controller.go | 10 +-- exp/controllers/rosamachinepool_controller.go | 16 ++-- exp/controllers/rosanetwork_controller.go | 20 ++--- .../rosanetwork_controller_test.go | 4 +- exp/controllers/rosaroleconfig_controller.go | 22 ++--- .../rosaroleconfig_controller_test.go | 4 +- pkg/cloud/interfaces.go | 4 +- pkg/cloud/scope/cluster.go | 18 ++-- pkg/cloud/scope/fargate.go | 12 +-- pkg/cloud/scope/launchtemplate.go | 4 +- pkg/cloud/scope/machine.go | 18 ++-- pkg/cloud/scope/machinepool.go | 16 ++-- pkg/cloud/scope/managedcontrolplane.go | 8 +- pkg/cloud/scope/managednodegroup.go | 20 ++--- pkg/cloud/scope/rosacontrolplane.go | 8 +- pkg/cloud/scope/rosamachinepool.go | 18 ++-- pkg/cloud/scope/rosanetwork.go | 8 +- pkg/cloud/scope/rosaroleconfig.go | 7 +- pkg/cloud/scope/session.go | 30 ++++--- pkg/cloud/services/ec2/bastion.go | 14 +-- pkg/cloud/services/ec2/launchtemplate.go | 26 +++--- pkg/cloud/services/eks/cluster.go | 22 ++--- pkg/cloud/services/eks/eks.go | 26 +++--- pkg/cloud/services/eks/fargate.go | 42 ++++----- pkg/cloud/services/elb/loadbalancer.go | 19 ++-- pkg/cloud/services/elb/loadbalancer_test.go | 14 +-- pkg/cloud/services/network/carriergateways.go | 4 +- .../services/network/egress_only_gateways.go | 4 +- pkg/cloud/services/network/gateways.go | 4 +- pkg/cloud/services/network/natgateways.go | 12 +-- pkg/cloud/services/network/network.go | 88 +++++++++---------- pkg/cloud/services/network/routetables.go | 7 +- pkg/cloud/services/network/subnets.go | 4 +- pkg/cloud/services/network/vpc.go | 6 +- .../services/securitygroup/securitygroups.go | 14 +-- test/e2e/suites/unmanaged/helpers_test.go | 4 +- .../unmanaged/unmanaged_functional_test.go | 6 +- util/paused/paused.go | 10 +-- 49 files changed, 391 insertions(+), 390 deletions(-) diff --git a/bootstrap/eks/controllers/eksconfig_controller.go b/bootstrap/eks/controllers/eksconfig_controller.go index b9a4a800e1..af9d0ee8d8 100644 --- a/bootstrap/eks/controllers/eksconfig_controller.go +++ b/bootstrap/eks/controllers/eksconfig_controller.go @@ -47,7 +47,7 @@ import ( bsutil "sigs.k8s.io/cluster-api/bootstrap/util" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -125,11 +125,11 @@ func (r *EKSConfigReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( // set up defer block for updating config defer func() { - conditions.SetSummary(config, - conditions.WithConditions( + v1beta1conditions.SetSummary(config, + v1beta1conditions.WithConditions( eksbootstrapv1.DataSecretAvailableCondition, ), - conditions.WithStepCounter(), + v1beta1conditions.WithStepCounter(), ) patchOpts := []patch.Option{} @@ -209,7 +209,7 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 if !meta.IsStatusConditionTrue(cluster.GetConditions(), string(clusterv1beta1.InfrastructureReadyCondition)) { log.Info("Cluster infrastructure is not ready") - conditions.MarkFalse(config, + v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") @@ -218,7 +218,7 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 if !meta.IsStatusConditionTrue(cluster.GetConditions(), string(clusterv1beta1.ControlPlaneInitializedCondition)) { log.Info("Control Plane has not yet been initialized") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.WaitingForControlPlaneInitializationReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } @@ -231,7 +231,7 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 files, err := r.resolveFiles(ctx, config) if err != nil { log.Info("Failed to resolve files for user data") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -276,14 +276,14 @@ func (r *EKSConfigReconciler) joinWorker(ctx context.Context, cluster *clusterv1 userDataScript, err := userdata.NewNode(nodeInput) if err != nil { log.Error(err, "Failed to create a worker join configuration") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "") return err } // store userdata as secret if err := r.storeBootstrapData(ctx, cluster, config, userDataScript); err != nil { log.Error(err, "Failed to store bootstrap data") - conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(config, eksbootstrapv1.DataSecretAvailableCondition, eksbootstrapv1.DataSecretGenerationFailedReason, clusterv1beta1.ConditionSeverityWarning, "") return err } @@ -358,7 +358,7 @@ func (r *EKSConfigReconciler) storeBootstrapData(ctx context.Context, cluster *c config.Status.DataSecretName = ptr.To[string](secret.Name) config.Status.Ready = true - conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition) + v1beta1conditions.MarkTrue(config, eksbootstrapv1.DataSecretAvailableCondition) return nil } diff --git a/controllers/awscluster_controller.go b/controllers/awscluster_controller.go index cf1bdec3ee..f8c53c5aba 100644 --- a/controllers/awscluster_controller.go +++ b/controllers/awscluster_controller.go @@ -55,7 +55,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" capiannotations "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -294,17 +294,17 @@ func (r *AWSClusterReconciler) reconcileLoadBalancer(ctx context.Context, cluste if err := elbService.ReconcileLoadbalancers(ctx); err != nil { clusterScope.Error(err, "failed to reconcile load balancer") - conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.LoadBalancerFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) return nil, err } if awsCluster.Status.Network.APIServerELB.DNSName == "" { - conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForDNSNameReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForDNSNameReason, clusterv1beta1.ConditionSeverityInfo, "") clusterScope.Info("Waiting on API server ELB DNS name") return &retryAfterDuration, nil } - conditions.MarkTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) + v1beta1conditions.MarkTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) awsCluster.Spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{ Host: awsCluster.Status.Network.APIServerELB.DNSName, @@ -339,12 +339,12 @@ func (r *AWSClusterReconciler) reconcileNormal(ctx context.Context, clusterScope if err := sgService.ReconcileSecurityGroups(); err != nil { clusterScope.Error(err, "failed to reconcile security groups") - conditions.MarkFalse(awsCluster, infrav1beta1.ClusterSecurityGroupsReadyCondition, infrav1beta1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(awsCluster, infrav1beta1.ClusterSecurityGroupsReadyCondition, infrav1beta1.ClusterSecurityGroupReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) return reconcile.Result{}, err } if err := ec2Service.ReconcileBastion(); err != nil { - conditions.MarkFalse(awsCluster, infrav1beta1.BastionHostReadyCondition, infrav1beta1.BastionHostFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(awsCluster, infrav1beta1.BastionHostReadyCondition, infrav1beta1.BastionHostFailedReason, infrautilconditions.ErrorConditionAfterInit(clusterScope.ClusterObj()), "%s", err.Error()) clusterScope.Error(err, "failed to reconcile bastion host") return reconcile.Result{}, err } @@ -364,10 +364,10 @@ func (r *AWSClusterReconciler) reconcileNormal(ctx context.Context, clusterScope } if err := s3Service.ReconcileBucket(ctx); err != nil { - conditions.MarkFalse(awsCluster, infrav1beta1.S3BucketReadyCondition, infrav1.S3BucketFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(awsCluster, infrav1beta1.S3BucketReadyCondition, infrav1.S3BucketFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile S3 Bucket for AWSCluster %s/%s", awsCluster.Namespace, awsCluster.Name) } - conditions.MarkTrue(awsCluster, infrav1beta1.S3BucketReadyCondition) + v1beta1conditions.MarkTrue(awsCluster, infrav1beta1.S3BucketReadyCondition) for _, subnet := range clusterScope.Subnets().FilterPrivate() { found := false @@ -460,21 +460,21 @@ func (r *AWSClusterReconciler) checkForExternalControlPlaneLoadBalancer(clusterS switch { case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0 && awsCluster.Spec.ControlPlaneEndpoint.Port == 0: clusterScope.Info("AWSCluster control plane endpoint is still non-populated") - conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") return &requeueAfterPeriod case len(awsCluster.Spec.ControlPlaneEndpoint.Host) == 0: clusterScope.Info("AWSCluster control plane endpoint host is still non-populated") - conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") return &requeueAfterPeriod case awsCluster.Spec.ControlPlaneEndpoint.Port == 0: clusterScope.Info("AWSCluster control plane endpoint port is still non-populated") - conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(awsCluster, infrav1beta1.LoadBalancerReadyCondition, infrav1beta1.WaitForExternalControlPlaneEndpointReason, clusterv1beta1.ConditionSeverityInfo, "") return &requeueAfterPeriod default: - conditions.MarkTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) + v1beta1conditions.MarkTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) return nil } diff --git a/controllers/awsmachine_controller.go b/controllers/awsmachine_controller.go index c9f5546390..cd299c7133 100644 --- a/controllers/awsmachine_controller.go +++ b/controllers/awsmachine_controller.go @@ -64,7 +64,7 @@ import ( clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -337,13 +337,13 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope // all the other errors are blocking. // Because we are reconciling all load balancers, attempt to treat the error as a list of errors. if err = kerrors.FilterOut(err, elb.IsAccessDenied, elb.IsNotFound); err != nil { - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, errors.Errorf("failed to reconcile LB attachment: %+v", err) } } if machineScope.IsControlPlane() { - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } if feature.Gates.Enabled(feature.EventBridgeInstanceState) { @@ -368,7 +368,7 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope machineScope.Info("Terminating EC2 instance", "instance-id", instance.ID) // Set the InstanceReadyCondition and patch the object before the blocking operation - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := machineScope.PatchObject(); err != nil { machineScope.Error(err, "failed to patch object") return ctrl.Result{}, err @@ -376,11 +376,11 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope if err := ec2Service.TerminateInstance(instance.ID); err != nil { machineScope.Error(err, "failed to terminate instance") - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedTerminate", "Failed to terminate instance %q: %v", instance.ID, err) return ctrl.Result{}, err } - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // If the AWSMachine specifies NetworkStatus Interfaces, detach the cluster's core Security Groups from them as part of deletion. if len(machineScope.AWSMachine.Spec.NetworkInterfaces) > 0 { @@ -396,7 +396,7 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope "instanceID", instance.ID, ) - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := machineScope.PatchObject(); err != nil { return ctrl.Result{}, err } @@ -404,11 +404,11 @@ func (r *AWSMachineReconciler) reconcileDelete(ctx context.Context, machineScope for _, id := range machineScope.AWSMachine.Spec.NetworkInterfaces { if err := ec2Service.DetachSecurityGroupsFromNetworkInterface(core, id); err != nil { machineScope.Error(err, "failed to detach security groups from instance's network interfaces") - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, err } } - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } // Release an Elastic IP when the machine has public IP Address (EIP) with a cluster-wide config @@ -480,14 +480,14 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope if !*machineScope.Cluster.Status.Initialization.InfrastructureProvisioned { machineScope.Info("Cluster infrastructure is not ready yet") - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated. if !machineScope.IsMachinePoolMachine() && machineScope.Machine.Spec.Bootstrap.DataSecretName == nil { machineScope.Info("Bootstrap data secret reference is not yet available") - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -497,13 +497,13 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope instance, err := r.findInstance(machineScope, ec2svc) if err != nil { machineScope.Error(err, "unable to find instance") - conditions.MarkUnknown(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceNotFoundReason, "%s", err.Error()) + v1beta1conditions.MarkUnknown(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceNotFoundReason, "%s", err.Error()) return ctrl.Result{}, err } if instance == nil && machineScope.IsMachinePoolMachine() { err = errors.New("no instance found for machine pool") machineScope.Error(err, "unable to find instance") - conditions.MarkUnknown(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceNotFoundReason, "%s", err.Error()) + v1beta1conditions.MarkUnknown(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceNotFoundReason, "%s", err.Error()) return ctrl.Result{}, err } @@ -519,8 +519,8 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope // Create new instance since providerId is nil and instance could not be found by tags. if instance == nil { // Avoid a flickering condition between InstanceProvisionStarted and InstanceProvisionFailed if there's a persistent failure with createInstance - if conditions.GetReason(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition) != infrav1beta1.InstanceProvisionFailedReason { - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceProvisionStartedReason, clusterv1beta1.ConditionSeverityInfo, "") + if v1beta1conditions.GetReason(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition) != infrav1beta1.InstanceProvisionFailedReason { + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceProvisionStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if patchErr := machineScope.PatchObject(); patchErr != nil { machineScope.Error(patchErr, "failed to patch conditions") return ctrl.Result{}, patchErr @@ -536,7 +536,7 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope instance, err = r.createInstance(ctx, ec2svc, machineScope, clusterScope, objectStoreSvc) if err != nil { machineScope.Error(err, "unable to create instance") - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceProvisionFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceProvisionFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, err } } @@ -586,13 +586,13 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope case infrav1.InstanceStatePending: machineScope.SetNotReady() shouldRequeue = true - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceNotReadyReason, clusterv1beta1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceNotReadyReason, clusterv1beta1.ConditionSeverityWarning, "") case infrav1.InstanceStateStopping, infrav1.InstanceStateStopped: machineScope.SetNotReady() - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceStoppedReason, clusterv1beta1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceStoppedReason, clusterv1beta1.ConditionSeverityError, "") case infrav1.InstanceStateRunning: machineScope.SetReady() - conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition) + v1beta1conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition) case infrav1.InstanceStateShuttingDown, infrav1.InstanceStateTerminated: machineScope.SetNotReady() @@ -601,11 +601,11 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope // and therefore should not be reported as error. machineScope.Info("EC2 instance of machine pool was terminated", "state", instance.State, "instance-id", *machineScope.GetInstanceID()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, infrav1beta1.InstanceTerminatedReason, "EC2 instance termination") - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceTerminatedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceTerminatedReason, clusterv1beta1.ConditionSeverityInfo, "") } else { machineScope.Info("Unexpected EC2 instance termination", "state", instance.State, "instance-id", *machineScope.GetInstanceID()) r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "InstanceUnexpectedTermination", "Unexpected EC2 instance termination") - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceTerminatedReason, clusterv1beta1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, infrav1beta1.InstanceTerminatedReason, clusterv1beta1.ConditionSeverityError, "") } default: machineScope.SetNotReady() @@ -613,7 +613,7 @@ func (r *AWSMachineReconciler) reconcileNormal(ctx context.Context, machineScope r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "InstanceUnhandledState", "EC2 instance state is undefined") machineScope.SetFailureReason("UpdateError") machineScope.SetFailureMessage(errors.Errorf("EC2 instance state %q is undefined", instance.State)) - conditions.MarkUnknown(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, "", "") + v1beta1conditions.MarkUnknown(machineScope.AWSMachine, infrav1beta1.InstanceReadyCondition, "", "") } // reconcile the deletion of the bootstrap data secret now that we have updated instance state @@ -683,11 +683,11 @@ func (r *AWSMachineReconciler) reconcileOperationalState(ec2svc services.EC2Inte // Ensure that the security groups are correct. _, err = r.ensureSecurityGroups(ec2svc, machineScope, machineScope.AWSMachine.Spec.AdditionalSecurityGroups, existingSecurityGroups) if err != nil { - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, infrav1beta1.SecurityGroupsFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition, infrav1beta1.SecurityGroupsFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) machineScope.Error(err, "unable to ensure security groups") return err } - conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition) + v1beta1conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.SecurityGroupsReadyCondition) err = r.ensureInstanceMetadataOptions(ec2svc, instance, machineScope.AWSMachine) if err != nil { @@ -1016,12 +1016,12 @@ func (r *AWSMachineReconciler) registerInstanceToClassicLB(ctx context.Context, if err := elbsvc.RegisterInstanceWithAPIServerELB(ctx, i); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with classic load balancer: %v", i.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not register control plane instance %q with classic load balancer", i.ID) } r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB", "Control plane instance %q is registered with classic load balancer", i.ID) - conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition) + v1beta1conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition) return nil } @@ -1041,19 +1041,19 @@ func (r *AWSMachineReconciler) registerInstanceToV2LB(ctx context.Context, machi if ptr.Deref(machineScope.GetInstanceState(), infrav1.InstanceStatePending) != infrav1.InstanceStateRunning { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Cannot register control plane instance %q with load balancer: instance is not running", instance.ID) - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityInfo, "instance not running") + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityInfo, "instance not running") return elb.NewInstanceNotRunning("instance is not running") } if err := elbsvc.RegisterInstanceWithAPIServerLB(ctx, instance, lb); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedAttachControlPlaneELB", "Failed to register control plane instance %q with load balancer: %v", instance.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBAttachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not register control plane instance %q with load balancer", instance.ID) } r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeNormal, "SuccessfulAttachControlPlaneELB", "Control plane instance %q is registered with load balancer", instance.ID) - conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition) + v1beta1conditions.MarkTrue(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition) return nil } @@ -1072,7 +1072,7 @@ func (r *AWSMachineReconciler) deregisterInstanceFromClassicLB(ctx context.Conte if err := elbsvc.DeregisterInstanceFromAPIServerELB(ctx, instance); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB", "Failed to deregister control plane instance %q from load balancer: %v", instance.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBDetachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBDetachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", instance.ID) } @@ -1097,7 +1097,7 @@ func (r *AWSMachineReconciler) deregisterInstanceFromV2LB(ctx context.Context, m if err := elbsvc.DeregisterInstanceFromAPIServerLB(ctx, targetGroupArn, i); err != nil { r.Recorder.Eventf(machineScope.AWSMachine, corev1.EventTypeWarning, "FailedDetachControlPlaneELB", "Failed to deregister control plane instance %q from load balancer: %v", i.ID, err) - conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBDetachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machineScope.AWSMachine, infrav1beta1.ELBAttachedCondition, infrav1beta1.ELBDetachFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrapf(err, "could not deregister control plane instance %q from load balancer", i.ID) } } diff --git a/controllers/awsmachine_controller_test.go b/controllers/awsmachine_controller_test.go index b5e3129ac3..2ba61cf758 100644 --- a/controllers/awsmachine_controller_test.go +++ b/controllers/awsmachine_controller_test.go @@ -47,7 +47,7 @@ import ( clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func TestAWSMachineReconcilerIntegrationTests(t *testing.T) { @@ -541,7 +541,7 @@ type conditionAssertion struct { func expectConditions(g *WithT, m *infrav1.AWSMachine, expected []conditionAssertion) { g.Expect(len(m.Status.Conditions)).To(BeNumerically(">=", len(expected)), "number of conditions") for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := v1beta1conditions.Get(m, c.conditionType) g.Expect(actual).To(Not(BeNil())) g.Expect(actual.Type).To(Equal(c.conditionType)) g.Expect(actual.Status).To(Equal(c.status)) diff --git a/controllers/helpers_test.go b/controllers/helpers_test.go index 5a7d53f46c..d964eee194 100644 --- a/controllers/helpers_test.go +++ b/controllers/helpers_test.go @@ -37,7 +37,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/test/helpers" "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const DNSName = "www.google.com" @@ -127,7 +127,7 @@ var ( func expectAWSClusterConditions(g *WithT, m *infrav1.AWSCluster, expected []conditionAssertion) { g.Expect(len(m.Status.Conditions)).To(BeNumerically(">=", len(expected)), "number of conditions") for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := v1beta1conditions.Get(m, c.conditionType) g.Expect(actual).To(Not(BeNil())) g.Expect(actual.Type).To(Equal(c.conditionType)) g.Expect(actual.Status).To(Equal(c.status)) diff --git a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go index e7b2e14485..83b454b285 100644 --- a/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go +++ b/controlplane/eks/controllers/awsmanagedcontrolplane_controller.go @@ -58,7 +58,7 @@ import ( clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -287,7 +287,7 @@ func (r *AWSManagedControlPlaneReconciler) Reconcile(ctx context.Context, req ct } } - conditions.SetSummary(managedScope.ControlPlane, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) + v1beta1conditions.SetSummary(managedScope.ControlPlane, v1beta1conditions.WithConditions(applicableConditions...), v1beta1conditions.WithStepCounter()) if err := managedScope.Close(); err != nil && reterr == nil { reterr = err @@ -343,12 +343,12 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } if err := sgService.ReconcileSecurityGroups(); err != nil { - conditions.MarkFalse(awsManagedControlPlane, infrav1beta1.ClusterSecurityGroupsReadyCondition, infrav1beta1.ClusterSecurityGroupReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(awsManagedControlPlane, infrav1beta1.ClusterSecurityGroupsReadyCondition, infrav1beta1.ClusterSecurityGroupReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile general security groups for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) } if err := ec2Service.ReconcileBastion(); err != nil { - conditions.MarkFalse(awsManagedControlPlane, infrav1beta1.BastionHostReadyCondition, infrav1beta1.BastionHostFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(awsManagedControlPlane, infrav1beta1.BastionHostReadyCondition, infrav1beta1.BastionHostFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, fmt.Errorf("failed to reconcile bastion host for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) } @@ -357,7 +357,7 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } if err := awsnodeService.ReconcileCNI(ctx); err != nil { - conditions.MarkFalse(managedScope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, infrav1beta1.SecondaryCidrReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(managedScope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, infrav1beta1.SecondaryCidrReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, fmt.Errorf("failed to reconcile control plane for AWSManagedControlPlane %s/%s: %w", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name, err) } @@ -373,10 +373,10 @@ func (r *AWSManagedControlPlaneReconciler) reconcileNormal(ctx context.Context, } } if err := authService.ReconcileIAMAuthenticator(ctx); err != nil { - conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1beta1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(awsManagedControlPlane, ekscontrolplanev1beta1.IAMAuthenticatorConfiguredCondition, ekscontrolplanev1.IAMAuthenticatorConfigurationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return reconcile.Result{}, errors.Wrapf(err, "failed to reconcile aws-iam-authenticator config for AWSManagedControlPlane %s/%s", awsManagedControlPlane.Namespace, awsManagedControlPlane.Name) } - conditions.MarkTrue(awsManagedControlPlane, ekscontrolplanev1beta1.IAMAuthenticatorConfiguredCondition) + v1beta1conditions.MarkTrue(awsManagedControlPlane, ekscontrolplanev1beta1.IAMAuthenticatorConfiguredCondition) for _, subnet := range managedScope.Subnets().FilterPrivate() { managedScope.SetFailureDomain(subnet.AvailabilityZone, clusterv1.FailureDomain{ diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller.go b/controlplane/rosa/controllers/rosacontrolplane_controller.go index c0a311720b..a5f0587045 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller.go @@ -70,7 +70,7 @@ import ( clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/kubeconfig" "sigs.k8s.io/cluster-api/util/predicates" "sigs.k8s.io/cluster-api/util/secret" @@ -241,9 +241,9 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc return ctrl.Result{}, fmt.Errorf("failed to validate ROSAControlPlane.spec: %w", err) } - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneValidCondition) + v1beta1conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneValidCondition) if validationMessage != "" { - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneValidCondition, rosacontrolplanev1beta1.ROSAControlPlaneInvalidConfigurationReason, clusterv1beta1.ConditionSeverityError, @@ -268,7 +268,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc switch cluster.Status().State() { case cmv1.ClusterStateReady: - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition) + v1beta1conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition) rosaScope.ControlPlane.Status.Ready = true apiEndpoint, err := buildAPIEndpoint(cluster) @@ -301,7 +301,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc errorMessage := cluster.Status().ProvisionErrorMessage() rosaScope.ControlPlane.Status.FailureMessage = &errorMessage - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, string(cluster.Status().State()), clusterv1beta1.ConditionSeverityError, @@ -311,7 +311,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc return ctrl.Result{}, nil } - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, string(cluster.Status().State()), clusterv1beta1.ConditionSeverityInfo, @@ -337,7 +337,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc } // Is the referenced ROSANetwork ready yet? - if !conditions.IsTrue(rosaNet, expinfrav1beta1.ROSANetworkReadyCondition) { + if !v1beta1conditions.IsTrue(rosaNet, expinfrav1beta1.ROSANetworkReadyCondition) { rosaScope.Info(fmt.Sprintf("referenced ROSANetwork %s is not ready", rosaNet.Name)) return ctrl.Result{RequeueAfter: time.Minute}, nil } @@ -350,7 +350,7 @@ func (r *ROSAControlPlaneReconciler) reconcileNormal(ctx context.Context, rosaSc cluster, err = ocmClient.CreateCluster(ocmClusterSpec) if err != nil { - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, rosacontrolplanev1beta1.ReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, @@ -376,7 +376,7 @@ func (r *ROSAControlPlaneReconciler) reconcileRosaRoleConfig(ctx context.Context } if err := r.Client.Get(ctx, key, rosaRoleConfig); err != nil { - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSARoleConfigReadyCondition, rosacontrolplanev1beta1.ROSARoleConfigNotFoundReason, clusterv1beta1.ConditionSeverityError, @@ -386,8 +386,8 @@ func (r *ROSAControlPlaneReconciler) reconcileRosaRoleConfig(ctx context.Context } // Check if RosaRoleConfig is ready - if !conditions.IsTrue(rosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition) { - conditions.MarkFalse(rosaScope.ControlPlane, + if !v1beta1conditions.IsTrue(rosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition) { + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSARoleConfigReadyCondition, rosacontrolplanev1beta1.ROSARoleConfigNotReadyReason, clusterv1beta1.ConditionSeverityWarning, @@ -395,7 +395,7 @@ func (r *ROSAControlPlaneReconciler) reconcileRosaRoleConfig(ctx context.Context return nil, fmt.Errorf("RosaRoleConfig %s/%s is not ready", rosaScope.ControlPlane.Namespace, rosaScope.ControlPlane.Spec.RosaRoleConfigRef.Name) } - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSARoleConfigReadyCondition) + v1beta1conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSARoleConfigReadyCondition) } else { rosaRoleConfig.Status.OIDCID = rosaScope.ControlPlane.Spec.OIDCID rosaRoleConfig.Status.AccountRolesRef.InstallerRoleARN = rosaScope.ControlPlane.Spec.InstallerRoleARN @@ -453,7 +453,7 @@ func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaSc if cluster.Status().State() != cmv1.ClusterStateUninstalling { if _, err := ocmClient.DeleteCluster(cluster.ID(), bestEffort, creator); err != nil { - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, rosacontrolplanev1beta1.ROSAControlPlaneDeletionFailedReason, clusterv1beta1.ConditionSeverityError, @@ -464,7 +464,7 @@ func (r *ROSAControlPlaneReconciler) reconcileDelete(ctx context.Context, rosaSc } } - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, string(cluster.Status().State()), clusterv1beta1.ConditionSeverityInfo, @@ -511,7 +511,7 @@ func (r *ROSAControlPlaneReconciler) deleteMachinePools(ctx context.Context, ros func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.ROSAControlPlaneScope, ocmClient rosa.OCMClient, cluster *cmv1.Cluster) error { version := rosaScope.ControlPlane.Spec.Version if version == rosa.RawVersionID(cluster.Version()) { - conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneUpgradingCondition, "upgraded", clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneUpgradingCondition, "upgraded", clusterv1beta1.ConditionSeverityInfo, "") if cluster.Version() != nil { rosaScope.ControlPlane.Status.AvailableUpgrades = cluster.Version().AvailableUpgrades() @@ -541,7 +541,7 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.RO Reason: "failed", Message: fmt.Sprintf("failed to schedule upgrade to version %s: %v", version, err), } - conditions.Set(rosaScope.ControlPlane, condition) + v1beta1conditions.Set(rosaScope.ControlPlane, condition) return err } @@ -553,7 +553,7 @@ func (r *ROSAControlPlaneReconciler) reconcileClusterVersion(rosaScope *scope.RO Reason: string(scheduledUpgrade.State().Value()), Message: fmt.Sprintf("Upgrading to version %s", scheduledUpgrade.Version()), } - conditions.Set(rosaScope.ControlPlane, condition) + v1beta1conditions.Set(rosaScope.ControlPlane, condition) // if cluster is already upgrading to another version we need to wait until the current upgrade is finished, return an error to requeue and try later. if scheduledUpgrade.Version() != version { @@ -570,7 +570,7 @@ func (r *ROSAControlPlaneReconciler) updateOCMCluster(rosaScope *scope.ROSAContr // Update the cluster. rosaScope.Info("Updating cluster") if err := ocmClient.UpdateCluster(cluster.ID(), creator, ocmClusterSpec); err != nil { - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ROSAControlPlaneValidCondition, rosacontrolplanev1beta1.ROSAControlPlaneInvalidConfigurationReason, clusterv1beta1.ConditionSeverityError, @@ -674,14 +674,14 @@ func (r *ROSAControlPlaneReconciler) reconcileExternalAuth(ctx context.Context, var errs []error if err := r.reconcileExternalAuthProviders(ctx, externalAuthClient, rosaScope, cluster); err != nil { errs = append(errs, err) - conditions.MarkFalse(rosaScope.ControlPlane, + v1beta1conditions.MarkFalse(rosaScope.ControlPlane, rosacontrolplanev1beta1.ExternalAuthConfiguredCondition, rosacontrolplanev1beta1.ReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) } else { - conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ExternalAuthConfiguredCondition) + v1beta1conditions.MarkTrue(rosaScope.ControlPlane, rosacontrolplanev1beta1.ExternalAuthConfiguredCondition) } if err := r.reconcileExternalAuthBootstrapKubeconfig(ctx, externalAuthClient, rosaScope, cluster); err != nil { diff --git a/controlplane/rosa/controllers/rosacontrolplane_controller_test.go b/controlplane/rosa/controllers/rosacontrolplane_controller_test.go index ac64218421..ea765baeac 100644 --- a/controlplane/rosa/controllers/rosacontrolplane_controller_test.go +++ b/controlplane/rosa/controllers/rosacontrolplane_controller_test.go @@ -55,7 +55,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/patch" ) @@ -517,7 +517,7 @@ func TestRosaControlPlaneReconcileStatusVersion(t *testing.T) { key := client.ObjectKey{Name: rosaControlPlane.Name, Namespace: rosaControlPlane.Namespace} errGet := testEnv.Get(ctx, key, cp) g.Expect(errGet).NotTo(HaveOccurred()) - oldCondition := conditions.Get(cp, clusterv1beta1.PausedV1Beta2Condition) + oldCondition := v1beta1conditions.Get(cp, clusterv1beta1.PausedV1Beta2Condition) g.Expect(oldCondition).NotTo(BeNil()) r := ROSAControlPlaneReconciler{ diff --git a/exp/controllers/awsfargatepool_controller.go b/exp/controllers/awsfargatepool_controller.go index 1f98399741..e736dffab0 100644 --- a/exp/controllers/awsfargatepool_controller.go +++ b/exp/controllers/awsfargatepool_controller.go @@ -39,7 +39,7 @@ import ( clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -119,7 +119,7 @@ func (r *AWSFargateProfileReconciler) Reconcile(ctx context.Context, req ctrl.Re expinfrav1beta1.EKSFargateProfileReadyCondition, } - conditions.SetSummary(fargateProfileScope.FargateProfile, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) + v1beta1conditions.SetSummary(fargateProfileScope.FargateProfile, v1beta1conditions.WithConditions(applicableConditions...), v1beta1conditions.WithStepCounter()) if err := fargateProfileScope.Close(); err != nil && reterr == nil { reterr = err @@ -128,7 +128,7 @@ func (r *AWSFargateProfileReconciler) Reconcile(ctx context.Context, req ctrl.Re if !controlPlane.Status.Ready { log.Info("Control plane is not ready yet") - conditions.MarkFalse(fargateProfile, clusterv1.ReadyCondition, expinfrav1beta1.WaitingForEKSControlPlaneReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(fargateProfile, clusterv1.ReadyCondition, expinfrav1beta1.WaitingForEKSControlPlaneReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } diff --git a/exp/controllers/awsmachinepool_controller.go b/exp/controllers/awsmachinepool_controller.go index ef40e218cf..68cffeafe6 100644 --- a/exp/controllers/awsmachinepool_controller.go +++ b/exp/controllers/awsmachinepool_controller.go @@ -59,7 +59,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -184,12 +184,12 @@ func (r *AWSMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Always close the scope when exiting this function so we can persist any AWSMachine changes. defer func() { // set Ready condition before AWSMachinePool is patched - conditions.SetSummary(machinePoolScope.AWSMachinePool, - conditions.WithConditions( + v1beta1conditions.SetSummary(machinePoolScope.AWSMachinePool, + v1beta1conditions.WithConditions( expinfrav1beta1.ASGReadyCondition, expinfrav1beta1.LaunchTemplateReadyCondition, ), - conditions.WithStepCounterIfOnly( + v1beta1conditions.WithStepCounterIfOnly( expinfrav1beta1.ASGReadyCondition, expinfrav1beta1.LaunchTemplateReadyCondition, ), @@ -282,14 +282,14 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP if !*machinePoolScope.Cluster.Status.Initialization.InfrastructureProvisioned { machinePoolScope.Info("Cluster infrastructure is not ready yet") - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, infrav1beta1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, infrav1beta1.WaitingForClusterInfrastructureReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } // Make sure bootstrap data is available and populated if machinePoolScope.MachinePool.Spec.Template.Spec.Bootstrap.DataSecretName == nil { machinePoolScope.Info("Bootstrap data secret reference is not yet available") - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, infrav1beta1.WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, infrav1beta1.WaitingForBootstrapDataReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -301,7 +301,7 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP // Find existing ASG asg, err := r.findASG(machinePoolScope, asgsvc) if err != nil { - conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, expinfrav1beta1.ASGNotFoundReason, "%s", err.Error()) + v1beta1conditions.MarkUnknown(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, expinfrav1beta1.ASGNotFoundReason, "%s", err.Error()) return ctrl.Result{}, err } @@ -354,12 +354,12 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP } // set the LaunchTemplateReady condition - conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1beta1.LaunchTemplateReadyCondition) + v1beta1conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1beta1.LaunchTemplateReadyCondition) if asg == nil { // Create new ASG if err := r.createPool(machinePoolScope, clusterScope); err != nil { - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, expinfrav1beta1.ASGProvisionFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, expinfrav1beta1.ASGProvisionFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return ctrl.Result{}, err } return ctrl.Result{ @@ -375,13 +375,13 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP if err := createAWSMachinesIfNotExists(ctx, awsMachineList, machinePoolScope.MachinePool, &machinePoolScope.AWSMachinePool.ObjectMeta, &machinePoolScope.AWSMachinePool.TypeMeta, asg, machinePoolScope.GetLogger(), r.Client, ec2Svc); err != nil { machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineCreationFailed, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineCreationFailed, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to create awsmachines: %w", err) } if err := deleteOrphanedAWSMachines(ctx, awsMachineList, asg, machinePoolScope.GetLogger(), r.Client); err != nil { machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineDeletionFailed, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, clusterv1.ReadyCondition, expinfrav1.AWSMachineDeletionFailed, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return ctrl.Result{}, fmt.Errorf("failed to clean up awsmachines: %w", err) } } @@ -439,7 +439,7 @@ func (r *AWSMachinePoolReconciler) reconcileNormal(ctx context.Context, machineP machinePoolScope.AWSMachinePool.Spec.ProviderIDList = providerIDList machinePoolScope.AWSMachinePool.Status.Replicas = int32(len(providerIDList)) //#nosec G115 machinePoolScope.AWSMachinePool.Status.Ready = true - conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition) + v1beta1conditions.MarkTrue(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition) err = machinePoolScope.UpdateInstanceStatuses(ctx, asg.Instances) if err != nil { @@ -485,7 +485,7 @@ func (r *AWSMachinePoolReconciler) reconcileDelete(ctx context.Context, machineP case expinfrav1.ASGStatusDeleteInProgress: // ASG is already deleting machinePoolScope.SetNotReady() - conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, expinfrav1.ASGDeletionInProgress, clusterv1beta1.ConditionSeverityWarning, "") + v1beta1conditions.MarkFalse(machinePoolScope.AWSMachinePool, expinfrav1beta1.ASGReadyCondition, expinfrav1.ASGDeletionInProgress, clusterv1beta1.ConditionSeverityWarning, "") r.Recorder.Eventf(machinePoolScope.AWSMachinePool, corev1.EventTypeWarning, "DeletionInProgress", "ASG deletion in progress: %q", asg.Name) machinePoolScope.Info("ASG is already deleting", "name", asg.Name) default: diff --git a/exp/controllers/awsmachinepool_controller_test.go b/exp/controllers/awsmachinepool_controller_test.go index 11d183dcf8..4b9807c22f 100644 --- a/exp/controllers/awsmachinepool_controller_test.go +++ b/exp/controllers/awsmachinepool_controller_test.go @@ -59,7 +59,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/labels/format" "sigs.k8s.io/cluster-api/util/patch" ) @@ -1392,7 +1392,7 @@ type conditionAssertion struct { func expectConditions(g *WithT, m *expinfrav1.AWSMachinePool, expected []conditionAssertion) { g.Expect(len(m.Status.Conditions)).To(BeNumerically(">=", len(expected)), "number of conditions") for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := v1beta1conditions.Get(m, c.conditionType) g.Expect(actual).To(Not(BeNil())) g.Expect(actual.Type).To(Equal(c.conditionType)) g.Expect(actual.Status).To(Equal(c.status)) diff --git a/exp/controllers/awsmanagedmachinepool_controller.go b/exp/controllers/awsmanagedmachinepool_controller.go index 65302fac4f..dd856f7811 100644 --- a/exp/controllers/awsmanagedmachinepool_controller.go +++ b/exp/controllers/awsmanagedmachinepool_controller.go @@ -48,7 +48,7 @@ import ( clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -153,7 +153,7 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr if !controlPlane.Status.Ready { log.Info("Control plane is not ready yet") - conditions.MarkFalse(awsPool, expinfrav1beta1.EKSNodegroupReadyCondition, expinfrav1beta1.WaitingForEKSControlPlaneReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(awsPool, expinfrav1beta1.EKSNodegroupReadyCondition, expinfrav1beta1.WaitingForEKSControlPlaneReason, clusterv1beta1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } @@ -181,7 +181,7 @@ func (r *AWSManagedMachinePoolReconciler) Reconcile(ctx context.Context, req ctr expinfrav1beta1.LaunchTemplateReadyCondition, } - conditions.SetSummary(machinePoolScope.ManagedMachinePool, conditions.WithConditions(applicableConditions...), conditions.WithStepCounter()) + v1beta1conditions.SetSummary(machinePoolScope.ManagedMachinePool, v1beta1conditions.WithConditions(applicableConditions...), v1beta1conditions.WithStepCounter()) if err := machinePoolScope.Close(); err != nil && reterr == nil { reterr = err @@ -228,7 +228,7 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( if err != nil { r.Recorder.Eventf(machinePoolScope.ManagedMachinePool, corev1.EventTypeWarning, "FailedLaunchTemplateReconcile", "Failed to reconcile launch template: %v", err) machinePoolScope.Error(err, "failed to reconcile launch template") - conditions.MarkFalse(machinePoolScope.ManagedMachinePool, expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(machinePoolScope.ManagedMachinePool, expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "") return ctrl.Result{}, err } if res != nil { @@ -245,7 +245,7 @@ func (r *AWSManagedMachinePoolReconciler) reconcileNormal( } // set the LaunchTemplateReady condition - conditions.MarkTrue(machinePoolScope.ManagedMachinePool, expinfrav1beta1.LaunchTemplateReadyCondition) + v1beta1conditions.MarkTrue(machinePoolScope.ManagedMachinePool, expinfrav1beta1.LaunchTemplateReadyCondition) } if err := ekssvc.ReconcilePool(ctx); err != nil { diff --git a/exp/controllers/rosamachinepool_controller.go b/exp/controllers/rosamachinepool_controller.go index 464222166a..957e93c9dd 100644 --- a/exp/controllers/rosamachinepool_controller.go +++ b/exp/controllers/rosamachinepool_controller.go @@ -42,7 +42,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -179,7 +179,7 @@ func (r *ROSAMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Requ } defer func() { - conditions.SetSummary(machinePoolScope.RosaMachinePool, conditions.WithConditions(expinfrav1beta1.RosaMachinePoolReadyCondition), conditions.WithStepCounter()) + v1beta1conditions.SetSummary(machinePoolScope.RosaMachinePool, v1beta1conditions.WithConditions(expinfrav1beta1.RosaMachinePoolReadyCondition), v1beta1conditions.WithStepCounter()) if err := machinePoolScope.Close(); err != nil && reterr == nil { reterr = err @@ -271,7 +271,7 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, rosaMachinePool.Status.Replicas = currentReplicas if rosa.IsNodePoolReady(nodePool) { - conditions.MarkTrue(rosaMachinePool, expinfrav1beta1.RosaMachinePoolReadyCondition) + v1beta1conditions.MarkTrue(rosaMachinePool, expinfrav1beta1.RosaMachinePoolReadyCondition) rosaMachinePool.Status.Ready = true if err := r.reconcileMachinePoolVersion(machinePoolScope, ocmClient, nodePool); err != nil { @@ -281,7 +281,7 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, return ctrl.Result{}, nil } - conditions.MarkFalse(rosaMachinePool, + v1beta1conditions.MarkFalse(rosaMachinePool, expinfrav1beta1.RosaMachinePoolReadyCondition, nodePool.Status().Message(), clusterv1beta1.ConditionSeverityInfo, @@ -300,7 +300,7 @@ func (r *ROSAMachinePoolReconciler) reconcileNormal(ctx context.Context, nodePool, err = ocmClient.CreateNodePool(machinePoolScope.ControlPlane.Status.ID, nodePoolSpec) if err != nil { - conditions.MarkFalse(rosaMachinePool, + v1beta1conditions.MarkFalse(rosaMachinePool, expinfrav1beta1.RosaMachinePoolReadyCondition, expinfrav1beta1.RosaMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, @@ -344,7 +344,7 @@ func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope version := machinePoolScope.RosaMachinePool.Spec.Version if version == "" || version == rosa.RawVersionID(nodePool.Version()) { machinePoolScope.RosaMachinePool.Status.AvailableUpgrades = nodePool.Version().AvailableUpgrades() - conditions.MarkFalse(machinePoolScope.RosaMachinePool, expinfrav1beta1.RosaMachinePoolUpgradingCondition, "upgraded", clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(machinePoolScope.RosaMachinePool, expinfrav1beta1.RosaMachinePoolUpgradingCondition, "upgraded", clusterv1beta1.ConditionSeverityInfo, "") return nil } @@ -367,7 +367,7 @@ func (r *ROSAMachinePoolReconciler) reconcileMachinePoolVersion(machinePoolScope Reason: string(scheduledUpgrade.State().Value()), Message: fmt.Sprintf("Upgrading to version %s", scheduledUpgrade.Version()), } - conditions.Set(machinePoolScope.RosaMachinePool, condition) + v1beta1conditions.Set(machinePoolScope.RosaMachinePool, condition) // if nodePool is already upgrading to another version we need to wait until the current upgrade is finished, return an error to requeue and try later. if scheduledUpgrade.Version() != version { @@ -412,7 +412,7 @@ func (r *ROSAMachinePoolReconciler) updateNodePool(machinePoolScope *scope.RosaM updatedNodePool, err := ocmClient.UpdateNodePool(machinePoolScope.ControlPlane.Status.ID, nodePoolSpec) if err != nil { - conditions.MarkFalse(machinePoolScope.RosaMachinePool, + v1beta1conditions.MarkFalse(machinePoolScope.RosaMachinePool, expinfrav1beta1.RosaMachinePoolReadyCondition, expinfrav1beta1.RosaMachinePoolReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, diff --git a/exp/controllers/rosanetwork_controller.go b/exp/controllers/rosanetwork_controller.go index 8d6c5824ff..0889cab4ab 100644 --- a/exp/controllers/rosanetwork_controller.go +++ b/exp/controllers/rosanetwork_controller.go @@ -45,7 +45,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/scope" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -151,7 +151,7 @@ func (r *ROSANetworkReconciler) reconcileNormal(ctx context.Context, rosaNetScop // Call the AWS CF stack create API _, err := r.awsClient.CreateStackWithParamsTags(ctx, templateBody, rosaNetScope.ROSANetwork.Spec.StackName, cfParams, rosaNetScope.ROSANetwork.Spec.StackTags) if err != nil { - conditions.MarkFalse(rosaNetScope.ROSANetwork, + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, expinfrav1beta1.ROSANetworkReadyCondition, expinfrav1beta1.ROSANetworkFailedReason, clusterv1beta1.ConditionSeverityError, @@ -159,7 +159,7 @@ func (r *ROSANetworkReconciler) reconcileNormal(ctx context.Context, rosaNetScop err.Error()) return ctrl.Result{}, fmt.Errorf("failed to start CF stack creation: %w", err) } - conditions.MarkFalse(rosaNetScope.ROSANetwork, + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, expinfrav1beta1.ROSANetworkReadyCondition, expinfrav1beta1.ROSANetworkCreatingReason, clusterv1beta1.ConditionSeverityInfo, @@ -175,7 +175,7 @@ func (r *ROSANetworkReconciler) reconcileNormal(ctx context.Context, rosaNetScop switch r.cfStack.StackStatus { case cloudformationtypes.StackStatusCreateInProgress: // Create in progress // Set the reason of false ROSANetworkReadyCondition to Creating - conditions.MarkFalse(rosaNetScope.ROSANetwork, + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, expinfrav1beta1.ROSANetworkReadyCondition, expinfrav1beta1.ROSANetworkCreatingReason, clusterv1beta1.ConditionSeverityInfo, @@ -187,8 +187,8 @@ func (r *ROSANetworkReconciler) reconcileNormal(ctx context.Context, rosaNetScop } // Set the reason of true ROSANetworkReadyCondition to Created - // We have to use conditions.Set(), since conditions.MarkTrue() does not support setting reason - conditions.Set(rosaNetScope.ROSANetwork, + // We have to use v1beta1conditions.Set(), since v1beta1conditions.MarkTrue() does not support setting reason + v1beta1conditions.Set(rosaNetScope.ROSANetwork, &clusterv1beta1.Condition{ Type: expinfrav1beta1.ROSANetworkReadyCondition, Status: corev1.ConditionTrue, @@ -198,7 +198,7 @@ func (r *ROSANetworkReconciler) reconcileNormal(ctx context.Context, rosaNetScop return ctrl.Result{}, nil case cloudformationtypes.StackStatusCreateFailed: // Create failed // Set the reason of false ROSANetworkReadyCondition to Failed - conditions.MarkFalse(rosaNetScope.ROSANetwork, + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, expinfrav1beta1.ROSANetworkReadyCondition, expinfrav1beta1.ROSANetworkFailedReason, clusterv1beta1.ConditionSeverityError, @@ -222,7 +222,7 @@ func (r *ROSANetworkReconciler) reconcileDelete(ctx context.Context, rosaNetScop case cloudformationtypes.StackStatusDeleteInProgress: // Deletion in progress return ctrl.Result{RequeueAfter: time.Second * 60}, nil case cloudformationtypes.StackStatusDeleteFailed: // Deletion failed - conditions.MarkFalse(rosaNetScope.ROSANetwork, + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, expinfrav1beta1.ROSANetworkReadyCondition, expinfrav1beta1.ROSANetworkDeletionFailedReason, clusterv1beta1.ConditionSeverityError, @@ -231,7 +231,7 @@ func (r *ROSANetworkReconciler) reconcileDelete(ctx context.Context, rosaNetScop default: // All the other states err := r.awsClient.DeleteCFStack(ctx, rosaNetScope.ROSANetwork.Spec.StackName) if err != nil { - conditions.MarkFalse(rosaNetScope.ROSANetwork, + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, expinfrav1beta1.ROSANetworkReadyCondition, expinfrav1beta1.ROSANetworkDeletionFailedReason, clusterv1beta1.ConditionSeverityError, @@ -239,7 +239,7 @@ func (r *ROSANetworkReconciler) reconcileDelete(ctx context.Context, rosaNetScop err.Error()) return ctrl.Result{}, fmt.Errorf("failed to start CF stack deletion: %w", err) } - conditions.MarkFalse(rosaNetScope.ROSANetwork, + v1beta1conditions.MarkFalse(rosaNetScope.ROSANetwork, expinfrav1beta1.ROSANetworkReadyCondition, expinfrav1beta1.ROSANetworkDeletingReason, clusterv1beta1.ConditionSeverityInfo, diff --git a/exp/controllers/rosanetwork_controller_test.go b/exp/controllers/rosanetwork_controller_test.go index 8384ad170f..1691675fb7 100644 --- a/exp/controllers/rosanetwork_controller_test.go +++ b/exp/controllers/rosanetwork_controller_test.go @@ -41,7 +41,7 @@ import ( expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func TestROSANetworkReconciler_Reconcile(t *testing.T) { @@ -696,5 +696,5 @@ func getROSANetworkReadyCondition(reconciler *ROSANetworkReconciler, rosaNet *ex return nil, err } - return conditions.Get(updatedROSANetwork, expinfrav1beta1.ROSANetworkReadyCondition), nil + return v1beta1conditions.Get(updatedROSANetwork, expinfrav1beta1.ROSANetworkReadyCondition), nil } diff --git a/exp/controllers/rosaroleconfig_controller.go b/exp/controllers/rosaroleconfig_controller.go index 150642206c..5f8af158ba 100644 --- a/exp/controllers/rosaroleconfig_controller.go +++ b/exp/controllers/rosaroleconfig_controller.go @@ -53,7 +53,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/rosa" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -110,7 +110,7 @@ func (r *ROSARoleConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reque // Always close the scope and set summary condition defer func() { - conditions.SetSummary(scope.RosaRoleConfig, conditions.WithConditions(expinfrav1beta1.RosaRoleConfigReadyCondition), conditions.WithStepCounter()) + v1beta1conditions.SetSummary(scope.RosaRoleConfig, v1beta1conditions.WithConditions(expinfrav1beta1.RosaRoleConfigReadyCondition), v1beta1conditions.WithStepCounter()) if err := scope.PatchObject(); err != nil { reterr = errors.Join(reterr, err) } @@ -122,7 +122,7 @@ func (r *ROSARoleConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reque if !roleConfig.DeletionTimestamp.IsZero() { scope.Info("Deleting ROSARoleConfig.") - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigDeletionStarted, clusterv1beta1.ConditionSeverityInfo, "Deletion of RosaRolesConfig started") + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1.RosaRoleConfigDeletionStarted, clusterv1beta1.ConditionSeverityInfo, "Deletion of RosaRolesConfig started") err = r.reconcileDelete(scope) if err == nil { controllerutil.RemoveFinalizer(scope.RosaRoleConfig, expinfrav1.RosaRoleConfigFinalizer) @@ -136,22 +136,22 @@ func (r *ROSARoleConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reque } if err := r.reconcileAccountRoles(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "Account Roles failure: %v", err) + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "Account Roles failure: %v", err) return ctrl.Result{}, fmt.Errorf("account Roles: %w", err) } if err := r.reconcileOIDC(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "OIDC Config/provider failure: %v", err) + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "OIDC Config/provider failure: %v", err) return ctrl.Result{}, fmt.Errorf("oicd Config: %w", err) } if err := r.reconcileOperatorRoles(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "Operator Roles failure: %v", err) + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "Operator Roles failure: %v", err) return ctrl.Result{}, fmt.Errorf("operator Roles: %w", err) } if r.rosaRolesConfigReady(scope.RosaRoleConfig) { - conditions.Set(scope.RosaRoleConfig, + v1beta1conditions.Set(scope.RosaRoleConfig, &clusterv1beta1.Condition{ Type: expinfrav1beta1.RosaRoleConfigReadyCondition, Status: corev1.ConditionTrue, @@ -160,7 +160,7 @@ func (r *ROSARoleConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reque Message: "RosaRoleConfig is ready", }) } else { - conditions.Set(scope.RosaRoleConfig, + v1beta1conditions.Set(scope.RosaRoleConfig, &clusterv1beta1.Condition{ Type: expinfrav1beta1.RosaRoleConfigReadyCondition, Status: corev1.ConditionFalse, @@ -175,17 +175,17 @@ func (r *ROSARoleConfigReconciler) Reconcile(ctx context.Context, req ctrl.Reque func (r *ROSARoleConfigReconciler) reconcileDelete(scope *scope.RosaRoleConfigScope) error { if err := r.deleteOperatorRoles(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete operator roles: %v", err) + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete operator roles: %v", err) return err } if err := r.deleteOIDC(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete OIDC provider: %v", err) + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete OIDC provider: %v", err) return err } if err := r.deleteAccountRoles(scope); err != nil { - conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete account roles: %v", err) + v1beta1conditions.MarkFalse(scope.RosaRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition, expinfrav1beta1.RosaRoleConfigDeletionFailedReason, clusterv1beta1.ConditionSeverityError, "Failed to delete account roles: %v", err) return err } diff --git a/exp/controllers/rosaroleconfig_controller_test.go b/exp/controllers/rosaroleconfig_controller_test.go index a27feb334d..6e870e66af 100644 --- a/exp/controllers/rosaroleconfig_controller_test.go +++ b/exp/controllers/rosaroleconfig_controller_test.go @@ -47,7 +47,7 @@ import ( rosacontrolplanev1 "sigs.k8s.io/cluster-api-provider-aws/v2/controlplane/rosa/api/v1beta2" expinfrav1beta1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta1" expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // generateTestID creates a unique identifier for test resources. @@ -627,7 +627,7 @@ func TestROSARoleConfigReconcileExist(t *testing.T) { g.Expect(updatedRoleConfig.Status.OperatorRolesRef.KMSProviderARN).To(Equal("arn:aws:iam::123456789012:role/test-kube-system-kms-provider")) // Should have a condition indicating success - expect Ready condition to be True - readyCondition := conditions.Get(updatedRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition) + readyCondition := v1beta1conditions.Get(updatedRoleConfig, expinfrav1beta1.RosaRoleConfigReadyCondition) g.Expect(readyCondition).ToNot(BeNil()) g.Expect(readyCondition.Status).To(Equal(corev1.ConditionTrue)) g.Expect(readyCondition.Reason).To(Equal(expinfrav1beta1.RosaRoleConfigCreatedReason)) diff --git a/pkg/cloud/interfaces.go b/pkg/cloud/interfaces.go index 1f4fb37091..b69800ea2f 100644 --- a/pkg/cloud/interfaces.go +++ b/pkg/cloud/interfaces.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // Session represents an AWS session. @@ -45,7 +45,7 @@ type ScopeUsage interface { // ClusterObject represents a AWS cluster object. type ClusterObject interface { - conditions.Setter + v1beta1conditions.Setter } // ClusterScoper is the interface for a cluster scope. diff --git a/pkg/cloud/scope/cluster.go b/pkg/cloud/scope/cluster.go index 8f3255a1cd..72d4663db4 100644 --- a/pkg/cloud/scope/cluster.go +++ b/pkg/cloud/scope/cluster.go @@ -35,8 +35,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ClusterScopeParams defines the input parameters used to create a new Scope. @@ -81,7 +81,7 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { return nil, errors.Errorf("failed to create aws V2 session: %v", err) } - helper, err := patch.NewHelper(params.AWSCluster, params.Client) + helper, err := v1beta1patch.NewHelper(params.AWSCluster, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -97,7 +97,7 @@ func NewClusterScope(params ClusterScopeParams) (*ClusterScope, error) { type ClusterScope struct { logger.Logger client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster AWSCluster *infrav1.AWSCluster @@ -275,16 +275,16 @@ func (s *ClusterScope) PatchObject() error { } } - conditions.SetSummary(s.AWSCluster, - conditions.WithConditions(applicableConditions...), - conditions.WithStepCounterIf(s.AWSCluster.ObjectMeta.DeletionTimestamp.IsZero()), - conditions.WithStepCounter(), + v1beta1conditions.SetSummary(s.AWSCluster, + v1beta1conditions.WithConditions(applicableConditions...), + v1beta1conditions.WithStepCounterIf(s.AWSCluster.ObjectMeta.DeletionTimestamp.IsZero()), + v1beta1conditions.WithStepCounter(), ) return s.patchHelper.Patch( context.TODO(), s.AWSCluster, - patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ clusterv1beta1.ReadyCondition, infrav1beta1.VpcReadyCondition, infrav1beta1.SubnetsReadyCondition, diff --git a/pkg/cloud/scope/fargate.go b/pkg/cloud/scope/fargate.go index 27f67557d3..88d8780c40 100644 --- a/pkg/cloud/scope/fargate.go +++ b/pkg/cloud/scope/fargate.go @@ -34,8 +34,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // FargateProfileScopeParams defines the input parameters used to create a new Scope. @@ -75,7 +75,7 @@ func NewFargateProfileScope(params FargateProfileScopeParams) (*FargateProfileSc return nil, errors.Errorf("failed to create aws v2 session: %v", err) } - helper, err := patch.NewHelper(params.FargateProfile, params.Client) + helper, err := v1beta1patch.NewHelper(params.FargateProfile, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -98,7 +98,7 @@ func NewFargateProfileScope(params FargateProfileScopeParams) (*FargateProfileSc type FargateProfileScope struct { logger.Logger Client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster ControlPlane *ekscontrolplanev1.AWSManagedControlPlane @@ -174,7 +174,7 @@ func (s *FargateProfileScope) IAMReadyFalse(reason string, err string) error { if err == "" { severity = clusterv1beta1.ConditionSeverityInfo } - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.FargateProfile, expinfrav1beta1.IAMFargateRolesReadyCondition, reason, @@ -193,7 +193,7 @@ func (s *FargateProfileScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.FargateProfile, - patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateCreatingCondition, expinfrav1beta1.EKSFargateDeletingCondition, diff --git a/pkg/cloud/scope/launchtemplate.go b/pkg/cloud/scope/launchtemplate.go index 783597282e..f67a082783 100644 --- a/pkg/cloud/scope/launchtemplate.go +++ b/pkg/cloud/scope/launchtemplate.go @@ -25,7 +25,7 @@ import ( expinfrav1 "sigs.k8s.io/cluster-api-provider-aws/v2/exp/api/v1beta2" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // LaunchTemplateScope defines a scope defined around a launch template. @@ -43,7 +43,7 @@ type LaunchTemplateScope interface { AdditionalTags() infrav1.Tags GetObjectMeta() *metav1.ObjectMeta - GetSetter() conditions.Setter + GetSetter() v1beta1conditions.Setter PatchObject() error GetEC2Scope() EC2Scope diff --git a/pkg/cloud/scope/machine.go b/pkg/cloud/scope/machine.go index f905d6254d..1b6f76e28a 100644 --- a/pkg/cloud/scope/machine.go +++ b/pkg/cloud/scope/machine.go @@ -36,8 +36,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // MachineScopeParams defines the input parameters used to create a new MachineScope. @@ -74,7 +74,7 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { params.Logger = logger.NewLogger(log) } - helper, err := patch.NewHelper(params.AWSMachine, params.Client) + helper, err := v1beta1patch.NewHelper(params.AWSMachine, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -93,7 +93,7 @@ func NewMachineScope(params MachineScopeParams) (*MachineScope, error) { type MachineScope struct { logger.Logger client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster Machine *clusterv1.Machine @@ -313,16 +313,16 @@ func (m *MachineScope) PatchObject() error { applicableConditions = append(applicableConditions, infrav1beta1.ELBAttachedCondition) } - conditions.SetSummary(m.AWSMachine, - conditions.WithConditions(applicableConditions...), - conditions.WithStepCounterIf(m.AWSMachine.ObjectMeta.DeletionTimestamp.IsZero()), - conditions.WithStepCounter(), + v1beta1conditions.SetSummary(m.AWSMachine, + v1beta1conditions.WithConditions(applicableConditions...), + v1beta1conditions.WithStepCounterIf(m.AWSMachine.ObjectMeta.DeletionTimestamp.IsZero()), + v1beta1conditions.WithStepCounter(), ) return m.patchHelper.Patch( context.TODO(), m.AWSMachine, - patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ clusterv1.ReadyCondition, infrav1beta1.InstanceReadyCondition, infrav1beta1.SecurityGroupsReadyCondition, diff --git a/pkg/cloud/scope/machinepool.go b/pkg/cloud/scope/machinepool.go index a27524f10e..2674e7208b 100644 --- a/pkg/cloud/scope/machinepool.go +++ b/pkg/cloud/scope/machinepool.go @@ -39,16 +39,16 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/remote" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // MachinePoolScope defines a scope defined around a machine and its cluster. type MachinePoolScope struct { logger.Logger client.Client - patchHelper *patch.Helper - capiMachinePoolPatchHelper *patch.Helper + patchHelper *v1beta1patch.Helper + capiMachinePoolPatchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster MachinePool *clusterv1.MachinePool @@ -99,11 +99,11 @@ func NewMachinePoolScope(params MachinePoolScopeParams) (*MachinePoolScope, erro params.Logger = logger.NewLogger(log) } - ampHelper, err := patch.NewHelper(params.AWSMachinePool, params.Client) + ampHelper, err := v1beta1patch.NewHelper(params.AWSMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init AWSMachinePool patch helper") } - mpHelper, err := patch.NewHelper(params.MachinePool, params.Client) + mpHelper, err := v1beta1patch.NewHelper(params.MachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init MachinePool patch helper") } @@ -176,7 +176,7 @@ func (m *MachinePoolScope) PatchObject() error { return m.patchHelper.Patch( context.TODO(), m.AWSMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ expinfrav1beta1.ASGReadyCondition, expinfrav1beta1.LaunchTemplateReadyCondition, }}) @@ -239,7 +239,7 @@ func (m *MachinePoolScope) GetObjectMeta() *metav1.ObjectMeta { } // GetSetter returns the AWSMachinePool object setter. -func (m *MachinePoolScope) GetSetter() conditions.Setter { +func (m *MachinePoolScope) GetSetter() v1beta1conditions.Setter { return m.AWSMachinePool } diff --git a/pkg/cloud/scope/managedcontrolplane.go b/pkg/cloud/scope/managedcontrolplane.go index af268cf08b..2030da0144 100644 --- a/pkg/cloud/scope/managedcontrolplane.go +++ b/pkg/cloud/scope/managedcontrolplane.go @@ -43,7 +43,7 @@ import ( clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/controllers/remote" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) var scheme = runtime.NewScheme() @@ -105,7 +105,7 @@ func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*Manage managedScope.session = *session managedScope.serviceLimiters = serviceLimiters - helper, err := patch.NewHelper(params.ControlPlane, params.Client) + helper, err := v1beta1patch.NewHelper(params.ControlPlane, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -118,7 +118,7 @@ func NewManagedControlPlaneScope(params ManagedControlPlaneScopeParams) (*Manage type ManagedControlPlaneScope struct { logger.Logger Client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster ControlPlane *ekscontrolplanev1.AWSManagedControlPlane @@ -269,7 +269,7 @@ func (s *ManagedControlPlaneScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ControlPlane, - patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ infrav1beta1.VpcReadyCondition, infrav1beta1.SubnetsReadyCondition, infrav1beta1.ClusterSecurityGroupsReadyCondition, diff --git a/pkg/cloud/scope/managednodegroup.go b/pkg/cloud/scope/managednodegroup.go index 946d5496a1..fcf8ec2754 100644 --- a/pkg/cloud/scope/managednodegroup.go +++ b/pkg/cloud/scope/managednodegroup.go @@ -40,8 +40,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ManagedMachinePoolScopeParams defines the input parameters used to create a new Scope. @@ -93,11 +93,11 @@ func NewManagedMachinePoolScope(params ManagedMachinePoolScopeParams) (*ManagedM return nil, errors.Errorf("failed to create aws V2 session: %v", err) } - ammpHelper, err := patch.NewHelper(params.ManagedMachinePool, params.Client) + ammpHelper, err := v1beta1patch.NewHelper(params.ManagedMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init AWSManagedMachinePool patch helper") } - mpHelper, err := patch.NewHelper(params.MachinePool, params.Client) + mpHelper, err := v1beta1patch.NewHelper(params.MachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init MachinePool patch helper") } @@ -126,8 +126,8 @@ func NewManagedMachinePoolScope(params ManagedMachinePoolScopeParams) (*ManagedM type ManagedMachinePoolScope struct { logger.Logger client.Client - patchHelper *patch.Helper - capiMachinePoolPatchHelper *patch.Helper + patchHelper *v1beta1patch.Helper + capiMachinePoolPatchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster ControlPlane *ekscontrolplanev1.AWSManagedControlPlane @@ -236,7 +236,7 @@ func (s *ManagedMachinePoolScope) NodegroupReadyFalse(reason string, err string) if err == "" { severity = clusterv1beta1.ConditionSeverityInfo } - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.ManagedMachinePool, expinfrav1beta1.EKSNodegroupReadyCondition, reason, @@ -257,7 +257,7 @@ func (s *ManagedMachinePoolScope) IAMReadyFalse(reason string, err string) error if err == "" { severity = clusterv1beta1.ConditionSeverityInfo } - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.ManagedMachinePool, expinfrav1beta1.IAMNodegroupRolesReadyCondition, reason, @@ -276,7 +276,7 @@ func (s *ManagedMachinePoolScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ManagedMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ expinfrav1beta1.EKSNodegroupReadyCondition, expinfrav1beta1.IAMNodegroupRolesReadyCondition, }}) @@ -368,7 +368,7 @@ func (s *ManagedMachinePoolScope) GetObjectMeta() *metav1.ObjectMeta { } // GetSetter returns the condition setter. -func (s *ManagedMachinePoolScope) GetSetter() conditions.Setter { +func (s *ManagedMachinePoolScope) GetSetter() v1beta1conditions.Setter { return s.ManagedMachinePool } diff --git a/pkg/cloud/scope/rosacontrolplane.go b/pkg/cloud/scope/rosacontrolplane.go index 21b78080e9..d870fd3672 100644 --- a/pkg/cloud/scope/rosacontrolplane.go +++ b/pkg/cloud/scope/rosacontrolplane.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ROSAControlPlaneScopeParams defines the input parameters used to create a new ROSAControlPlaneScope. @@ -78,7 +78,7 @@ func NewROSAControlPlaneScope(params ROSAControlPlaneScopeParams) (*ROSAControlP return nil, errors.Errorf("failed to create aws V2 session: %v", err) } - helper, err := patch.NewHelper(params.ControlPlane, params.Client) + helper, err := v1beta1patch.NewHelper(params.ControlPlane, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -101,7 +101,7 @@ func NewROSAControlPlaneScope(params ROSAControlPlaneScopeParams) (*ROSAControlP type ROSAControlPlaneScope struct { logger.Logger Client client.Client - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster ControlPlane *rosacontrolplanev1.ROSAControlPlane @@ -216,7 +216,7 @@ func (s *ROSAControlPlaneScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ControlPlane, - patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ rosacontrolplanev1beta1.ROSAControlPlaneReadyCondition, rosacontrolplanev1beta1.ROSAControlPlaneValidCondition, rosacontrolplanev1beta1.ROSAControlPlaneUpgradingCondition, diff --git a/pkg/cloud/scope/rosamachinepool.go b/pkg/cloud/scope/rosamachinepool.go index 2f7ad536a4..ab05048f39 100644 --- a/pkg/cloud/scope/rosamachinepool.go +++ b/pkg/cloud/scope/rosamachinepool.go @@ -33,8 +33,8 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // RosaMachinePoolScopeParams defines the input parameters used to create a new Scope. @@ -65,11 +65,11 @@ func NewRosaMachinePoolScope(params RosaMachinePoolScopeParams) (*RosaMachinePoo params.Logger = logger.NewLogger(log) } - ammpHelper, err := patch.NewHelper(params.RosaMachinePool, params.Client) + ammpHelper, err := v1beta1patch.NewHelper(params.RosaMachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init RosaMachinePool patch helper") } - mpHelper, err := patch.NewHelper(params.MachinePool, params.Client) + mpHelper, err := v1beta1patch.NewHelper(params.MachinePool, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init MachinePool patch helper") } @@ -104,8 +104,8 @@ var _ cloud.SessionMetadata = &RosaMachinePoolScope{} type RosaMachinePoolScope struct { logger.Logger client.Client - patchHelper *patch.Helper - capiMachinePoolPatchHelper *patch.Helper + patchHelper *v1beta1patch.Helper + capiMachinePoolPatchHelper *v1beta1patch.Helper Cluster *clusterv1.Cluster ControlPlane *rosacontrolplanev1.ROSAControlPlane @@ -155,7 +155,7 @@ func (s *RosaMachinePoolScope) ControllerName() string { } // GetSetter returns the condition setter for the RosaMachinePool. -func (s *RosaMachinePoolScope) GetSetter() conditions.Setter { +func (s *RosaMachinePoolScope) GetSetter() v1beta1conditions.Setter { return s.RosaMachinePool } @@ -194,7 +194,7 @@ func (s *RosaMachinePoolScope) RosaMachinePoolReadyFalse(reason string, err stri if err == "" { severity = clusterv1beta1.ConditionSeverityInfo } - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.RosaMachinePool, expinfrav1beta1.RosaMachinePoolReadyCondition, reason, @@ -213,7 +213,7 @@ func (s *RosaMachinePoolScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.RosaMachinePool, - patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ expinfrav1beta1.RosaMachinePoolReadyCondition, }}) } diff --git a/pkg/cloud/scope/rosanetwork.go b/pkg/cloud/scope/rosanetwork.go index 7b8727de99..2f9f670823 100644 --- a/pkg/cloud/scope/rosanetwork.go +++ b/pkg/cloud/scope/rosanetwork.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ROSANetworkScopeParams defines the input parameters used to create a new ROSANetworkScope. @@ -47,7 +47,7 @@ type ROSANetworkScope struct { logger.Logger Client client.Client controllerName string - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper ROSANetwork *expinfrav1.ROSANetwork serviceLimiters throttle.ServiceLimiters session awsv2.Config @@ -73,7 +73,7 @@ func NewROSANetworkScope(params ROSANetworkScopeParams) (*ROSANetworkScope, erro return nil, errors.Errorf("failed to create aws V2 session: %v", err) } - patchHelper, err := patch.NewHelper(params.ROSANetwork, params.Client) + patchHelper, err := v1beta1patch.NewHelper(params.ROSANetwork, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } @@ -131,7 +131,7 @@ func (s *ROSANetworkScope) PatchObject() error { return s.patchHelper.Patch( context.TODO(), s.ROSANetwork, - patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ + v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ expinfrav1beta1.ROSANetworkReadyCondition, }}) } diff --git a/pkg/cloud/scope/rosaroleconfig.go b/pkg/cloud/scope/rosaroleconfig.go index 91c1eaea2c..1ee4c0d2c3 100644 --- a/pkg/cloud/scope/rosaroleconfig.go +++ b/pkg/cloud/scope/rosaroleconfig.go @@ -32,7 +32,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/throttle" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/logger" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // RosaRoleConfigScopeParams defines the input parameters used to create a new RosaRoleConfigScope. @@ -48,7 +48,7 @@ type RosaRoleConfigScope struct { logger.Logger Client client.Client controllerName string - patchHelper *patch.Helper + patchHelper *v1beta1patch.Helper RosaRoleConfig *expinfrav1.ROSARoleConfig serviceLimiters throttle.ServiceLimiters session aws.Config @@ -71,14 +71,13 @@ func NewRosaRoleConfigScope(params RosaRoleConfigScopeParams) (*RosaRoleConfigSc } session, serviceLimiters, err := sessionForClusterWithRegion(params.Client, RosaRoleConfigScope, "", params.Logger) - if err != nil { return nil, errors.Errorf("failed to create aws V2 session: %v", err) } iamClient := iam.NewFromConfig(*session) - patchHelper, err := patch.NewHelper(params.RosaRoleConfig, params.Client) + patchHelper, err := v1beta1patch.NewHelper(params.RosaRoleConfig, params.Client) if err != nil { return nil, errors.Wrap(err, "failed to init patch helper") } diff --git a/pkg/cloud/scope/session.go b/pkg/cloud/scope/session.go index a4bac26e77..bd053b4d4f 100644 --- a/pkg/cloud/scope/session.go +++ b/pkg/cloud/scope/session.go @@ -43,16 +43,18 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/util/system" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) const ( notPermittedError = "Namespace is not permitted to use %s: %s" ) -var sessionCache sync.Map -var providerCache sync.Map +var ( + sessionCache sync.Map + providerCache sync.Map +) type sessionCacheEntry struct { session *aws.Config @@ -72,7 +74,6 @@ func sessionForRegion(region string) (*aws.Config, throttle.ServiceLimiters, err } ns, err := config.LoadDefaultConfig(context.Background(), config.WithRegion(region)) - if err != nil { return nil, nil, err } @@ -92,7 +93,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se providers, err := getProvidersForCluster(context.Background(), k8sClient, clusterScoper, region, log) if err != nil { // could not get providers and retrieve the credentials - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1beta1.PrincipalCredentialRetrievedCondition, infrav1beta1.PrincipalCredentialRetrievalFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1beta1.PrincipalCredentialRetrievedCondition, infrav1beta1.PrincipalCredentialRetrievalFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, nil, errors.Wrap(err, "Failed to get providers for cluster") } @@ -130,7 +131,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se // Check if identity credentials can be retrieved. One reason this will fail is that source identity is not authorized for assume role. _, err := providers[0].Retrieve(context.Background()) if err != nil { - conditions.MarkUnknown(clusterScoper.InfraCluster(), infrav1beta1.PrincipalCredentialRetrievedCondition, infrav1beta1.CredentialProviderBuildFailedReason, "%s", err.Error()) + v1beta1conditions.MarkUnknown(clusterScoper.InfraCluster(), infrav1beta1.PrincipalCredentialRetrievedCondition, infrav1beta1.CredentialProviderBuildFailedReason, "%s", err.Error()) // delete the existing session from cache. Otherwise, we give back a defective session on next method invocation with same cluster scope sessionCache.Delete(getSessionName(region, clusterScoper)) @@ -141,7 +142,7 @@ func sessionForClusterWithRegion(k8sClient client.Client, clusterScoper cloud.Se optFns = append(optFns, config.WithCredentialsProvider(chainProvider)) } - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1beta1.PrincipalCredentialRetrievedCondition) + v1beta1conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1beta1.PrincipalCredentialRetrievedCondition) ns, err := config.LoadDefaultConfig(context.Background(), optFns...) if err != nil { @@ -227,7 +228,8 @@ func buildProvidersForRef( clusterScoper cloud.SessionMetadata, ref *infrav1.AWSIdentityReference, region string, - log logger.Wrapper) ([]identity.AWSPrincipalTypeProvider, error) { + log logger.Wrapper, +) ([]identity.AWSPrincipalTypeProvider, error) { if ref == nil { log.Trace("AWSCluster does not have a IdentityRef specified") return providers, nil @@ -289,21 +291,21 @@ func buildProvidersForRef( default: return providers, errors.Errorf("No such provider known: '%s'", ref.Kind) } - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition) + v1beta1conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition) return providers, nil } func setPrincipalUsageAllowedCondition(clusterScoper cloud.SessionMetadata) { - conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition) + v1beta1conditions.MarkTrue(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition) } func setPrincipalUsageNotAllowedCondition(kind infrav1.AWSIdentityKind, identityObjectKey client.ObjectKey, clusterScoper cloud.SessionMetadata) { errMsg := fmt.Sprintf(notPermittedError, kind, identityObjectKey.Name) if clusterScoper.IdentityRef().Name == identityObjectKey.Name { - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition, infrav1beta1.PrincipalUsageUnauthorizedReason, clusterv1beta1.ConditionSeverityError, "%s", errMsg) + v1beta1conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition, infrav1beta1.PrincipalUsageUnauthorizedReason, clusterv1beta1.ConditionSeverityError, "%s", errMsg) } else { - conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition, infrav1beta1.SourcePrincipalUsageUnauthorizedReason, clusterv1beta1.ConditionSeverityError, "%s", errMsg) + v1beta1conditions.MarkFalse(clusterScoper.InfraCluster(), infrav1beta1.PrincipalUsageAllowedCondition, infrav1beta1.SourcePrincipalUsageUnauthorizedReason, clusterv1beta1.ConditionSeverityError, "%s", errMsg) } } @@ -320,7 +322,7 @@ func buildAWSClusterStaticIdentity(ctx context.Context, identityObjectKey client } // Set ClusterStaticPrincipal as Secret's owner reference for 'clusterctl move'. - patchHelper, err := patch.NewHelper(secret, k8sClient) + patchHelper, err := v1beta1patch.NewHelper(secret, k8sClient) if err != nil { return nil, errors.Wrapf(err, "failed to init patch helper for secret name:%s namespace:%s", secret.Name, secret.Namespace) } diff --git a/pkg/cloud/services/ec2/bastion.go b/pkg/cloud/services/ec2/bastion.go index 441f5a4721..40601df50a 100644 --- a/pkg/cloud/services/ec2/bastion.go +++ b/pkg/cloud/services/ec2/bastion.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/userdata" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -73,8 +73,8 @@ func (s *Service) ReconcileBastion() error { // Describe bastion instance, if any. instance, err := s.describeBastionInstance() if awserrors.IsNotFound(err) { //nolint:nestif - if !conditions.Has(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, infrav1beta1.BastionCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") + if !v1beta1conditions.Has(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition) { + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, infrav1beta1.BastionCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } @@ -99,7 +99,7 @@ func (s *Service) ReconcileBastion() error { // TODO(vincepri): check for possible changes between the default spec and the instance. s.scope.SetBastionInstance(instance.DeepCopy()) - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition) s.scope.Debug("Reconcile bastion completed successfully") return nil @@ -116,20 +116,20 @@ func (s *Service) DeleteBastion() error { return errors.Wrap(err, "unable to describe bastion instance") } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.TerminateInstanceAndWait(instance.ID); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) record.Warnf(s.scope.InfraCluster(), "FailedTerminateBastion", "Failed to terminate bastion instance %q: %v", instance.ID, err) return errors.Wrap(err, "unable to delete bastion instance") } s.scope.SetBastionInstance(nil) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.BastionHostReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") record.Eventf(s.scope.InfraCluster(), "SuccessfulTerminateBastion", "Terminated bastion instance %q", instance.ID) s.scope.Info("Deleted bastion host", "id", instance.ID) diff --git a/pkg/cloud/services/ec2/launchtemplate.go b/pkg/cloud/services/ec2/launchtemplate.go index 1ba96aab34..f6750864f2 100644 --- a/pkg/cloud/services/ec2/launchtemplate.go +++ b/pkg/cloud/services/ec2/launchtemplate.go @@ -50,7 +50,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -86,13 +86,13 @@ func (s *Service) ReconcileLaunchTemplate( scope.Info("checking for existing launch template") launchTemplate, launchTemplateUserDataHash, launchTemplateUserDataSecretKey, _, err := ec2svc.GetLaunchTemplate(scope.LaunchTemplateName()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateNotFoundReason, "%s", err.Error()) + v1beta1conditions.MarkUnknown(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateNotFoundReason, "%s", err.Error()) return nil, err } imageID, err := ec2svc.DiscoverLaunchTemplateAMI(ctx, scope) if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateCreateFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateCreateFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } @@ -122,14 +122,14 @@ func (s *Service) ReconcileLaunchTemplate( // `AWSMachinePool.Spec.Ignition != nil` to toggle the S3 feature on for `AWSMachinePool` objects. objectURL, err := objectStoreSvc.CreateForMachinePool(ctx, scope, bootstrapData) if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } semver, err := semver.ParseTolerant(ignitionVersion) if err != nil { err = errors.Wrapf(err, "failed to parse ignition version %q", ignitionVersion) - conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } @@ -152,7 +152,7 @@ func (s *Service) ReconcileLaunchTemplate( userDataForLaunchTemplate, err = json.Marshal(ignData) if err != nil { err = errors.Wrap(err, "failed to convert ignition config to JSON") - conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } case 3: @@ -172,12 +172,12 @@ func (s *Service) ReconcileLaunchTemplate( userDataForLaunchTemplate, err = json.Marshal(ignData) if err != nil { err = errors.Wrap(err, "failed to convert ignition config to JSON") - conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } default: err = errors.Errorf("unsupported ignition version %q", ignitionVersion) - conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateReconcileFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } } else { @@ -192,7 +192,7 @@ func (s *Service) ReconcileLaunchTemplate( scope.Info("no existing launch template found, creating") launchTemplateID, err := ec2svc.CreateLaunchTemplate(scope, imageID, *bootstrapDataSecretKey, userDataForLaunchTemplate, userdata.ComputeHash(bootstrapData)) if err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateCreateFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateCreateFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } @@ -205,7 +205,7 @@ func (s *Service) ReconcileLaunchTemplate( if scope.GetLaunchTemplateIDStatus() == "" { launchTemplateID, err := ec2svc.GetLaunchTemplateID(scope.LaunchTemplateName()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateNotFoundReason, "%s", err.Error()) + v1beta1conditions.MarkUnknown(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateNotFoundReason, "%s", err.Error()) return nil, err } scope.SetLaunchTemplateIDStatus(launchTemplateID) @@ -217,7 +217,7 @@ func (s *Service) ReconcileLaunchTemplate( if scope.GetLaunchTemplateLatestVersionStatus() == "" { launchTemplateVersion, err := ec2svc.GetLaunchTemplateLatestVersion(scope.GetLaunchTemplateIDStatus()) if err != nil { - conditions.MarkUnknown(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateNotFoundReason, "%s", err.Error()) + v1beta1conditions.MarkUnknown(scope.GetSetter(), expinfrav1beta1.LaunchTemplateReadyCondition, expinfrav1beta1.LaunchTemplateNotFoundReason, "%s", err.Error()) return nil, err } scope.SetLaunchTemplateLatestVersionStatus(launchTemplateVersion) @@ -326,10 +326,10 @@ func (s *Service) ReconcileLaunchTemplate( if needsUpdate || tagsChanged || amiChanged || userDataSecretKeyChanged { if err := runPostLaunchTemplateUpdateOperation(); err != nil { - conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.PostLaunchTemplateUpdateOperationCondition, expinfrav1beta1.PostLaunchTemplateUpdateOperationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(scope.GetSetter(), expinfrav1beta1.PostLaunchTemplateUpdateOperationCondition, expinfrav1beta1.PostLaunchTemplateUpdateOperationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return nil, err } - conditions.MarkTrue(scope.GetSetter(), expinfrav1beta1.PostLaunchTemplateUpdateOperationCondition) + v1beta1conditions.MarkTrue(scope.GetSetter(), expinfrav1beta1.PostLaunchTemplateUpdateOperationCondition) } return nil, nil diff --git a/pkg/cloud/services/eks/cluster.go b/pkg/cloud/services/eks/cluster.go index 5980fbe485..8b75a49401 100644 --- a/pkg/cloud/services/eks/cluster.go +++ b/pkg/cloud/services/eks/cluster.go @@ -45,7 +45,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileCluster(ctx context.Context) error { @@ -209,12 +209,12 @@ func (s *Service) setStatus(cluster *ekstypes.Cluster) error { case ekstypes.ClusterStatusActive: s.scope.ControlPlane.Status.Ready = true s.scope.ControlPlane.Status.FailureMessage = nil - if conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneCreatingCondition) { + if v1beta1conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneCreatingCondition) { record.Eventf(s.scope.ControlPlane, "SuccessfulCreateEKSControlPlane", "Created new EKS control plane %s", s.scope.KubernetesClusterName()) - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneCreatingCondition, "created", clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneCreatingCondition, "created", clusterv1beta1.ConditionSeverityInfo, "") } - if conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition, "updated", clusterv1beta1.ConditionSeverityInfo, "") + if v1beta1conditions.IsTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) { + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition, "updated", clusterv1beta1.ConditionSeverityInfo, "") record.Eventf(s.scope.ControlPlane, "SuccessfulUpdateEKSControlPlane", "Updated EKS control plane %s", s.scope.KubernetesClusterName()) } if s.scope.ControlPlane.Spec.UpgradePolicy == ekscontrolplanev1.UpgradePolicyStandard && @@ -525,7 +525,7 @@ func (s *Service) createCluster(ctx context.Context, eksClusterName string) (*ek if out, err = s.EKSClient.CreateCluster(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneCreatingCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneCreatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedCreateEKSControlPlane", "Initiated creation of a new EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }, awserrors.ResourceNotFound); err != nil { // TODO: change the error that can be retried @@ -583,7 +583,7 @@ func (s *Service) reconcileClusterConfig(ctx context.Context, cluster *ekstypes. if _, err := s.EKSClient.UpdateClusterConfig(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated update of a new EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }); err != nil { @@ -626,7 +626,7 @@ func (s *Service) reconcileAccessConfig(ctx context.Context, accessConfig *eksty return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated auth config update for EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }); err != nil { @@ -655,7 +655,7 @@ func (s *Service) reconcileLogging(ctx context.Context, logging *ekstypes.Loggin if _, err := s.EKSClient.UpdateClusterConfig(ctx, input); err != nil { return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated logging update for EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil }); err != nil { @@ -803,7 +803,7 @@ func (s *Service) reconcileClusterVersion(ctx context.Context, cluster *ekstypes return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEKSControlPlane", "Initiated update of EKS control plane %s to version %s", s.scope.KubernetesClusterName(), nextVersionString) return true, nil @@ -874,7 +874,7 @@ func (s *Service) updateEncryptionConfig(ctx context.Context, updatedEncryptionC return false, err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneUpdatingCondition) record.Eventf(s.scope.ControlPlane, "InitiatedUpdateEncryptionConfig", "Initiated update of encryption config in EKS control plane %s", s.scope.KubernetesClusterName()) return true, nil diff --git a/pkg/cloud/services/eks/eks.go b/pkg/cloud/services/eks/eks.go index aed1575ac7..6c492b5ecd 100644 --- a/pkg/cloud/services/eks/eks.go +++ b/pkg/cloud/services/eks/eks.go @@ -28,7 +28,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ReconcileControlPlane reconciles a EKS control plane. @@ -37,31 +37,31 @@ func (s *Service) ReconcileControlPlane(ctx context.Context) error { // Control Plane IAM Role if err := s.reconcileControlPlaneIAMRole(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.IAMControlPlaneRolesReadyCondition, ekscontrolplanev1beta1.IAMControlPlaneRolesReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.IAMControlPlaneRolesReadyCondition, ekscontrolplanev1beta1.IAMControlPlaneRolesReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.IAMControlPlaneRolesReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.IAMControlPlaneRolesReadyCondition) // EKS Cluster if err := s.reconcileCluster(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneReadyCondition, ekscontrolplanev1beta1.EKSControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneReadyCondition, ekscontrolplanev1beta1.EKSControlPlaneReconciliationFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSControlPlaneReadyCondition) // EKS Addons if err := s.reconcileAddons(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSAddonsConfiguredCondition, ekscontrolplanev1beta1.EKSAddonsConfiguredFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSAddonsConfiguredCondition, ekscontrolplanev1beta1.EKSAddonsConfiguredFailedReason, clusterv1beta1.ConditionSeverityError, "%s", err.Error()) return errors.Wrap(err, "failed reconciling eks addons") } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSAddonsConfiguredCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSAddonsConfiguredCondition) // EKS Identity Provider if err := s.reconcileIdentityProvider(ctx); err != nil { - conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSIdentityProviderConfiguredCondition, ekscontrolplanev1beta1.EKSIdentityProviderConfiguredFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSIdentityProviderConfiguredCondition, ekscontrolplanev1beta1.EKSIdentityProviderConfiguredFailedReason, clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return errors.Wrap(err, "failed reconciling eks identity provider") } - conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSIdentityProviderConfiguredCondition) + v1beta1conditions.MarkTrue(s.scope.ControlPlane, ekscontrolplanev1beta1.EKSIdentityProviderConfiguredCondition) s.scope.Debug("Reconcile EKS control plane completed successfully") return nil @@ -95,7 +95,7 @@ func (s *NodegroupService) ReconcilePool(ctx context.Context) error { s.scope.Debug("Reconciling EKS nodegroup") if err := s.reconcileNodegroupIAMRole(ctx); err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.ManagedMachinePool, expinfrav1beta1.IAMNodegroupRolesReadyCondition, expinfrav1beta1.IAMNodegroupRolesReconciliationFailedReason, @@ -105,10 +105,10 @@ func (s *NodegroupService) ReconcilePool(ctx context.Context) error { ) return err } - conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1beta1.IAMNodegroupRolesReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1beta1.IAMNodegroupRolesReadyCondition) if err := s.reconcileNodegroup(ctx); err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.ManagedMachinePool, expinfrav1beta1.EKSNodegroupReadyCondition, expinfrav1beta1.EKSNodegroupReconciliationFailedReason, @@ -118,7 +118,7 @@ func (s *NodegroupService) ReconcilePool(ctx context.Context) error { ) return err } - conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1beta1.EKSNodegroupReadyCondition) + v1beta1conditions.MarkTrue(s.scope.ManagedMachinePool, expinfrav1beta1.EKSNodegroupReadyCondition) return nil } diff --git a/pkg/cloud/services/eks/fargate.go b/pkg/cloud/services/eks/fargate.go index 5da499d6c5..d7c32e4a60 100644 --- a/pkg/cloud/services/eks/fargate.go +++ b/pkg/cloud/services/eks/fargate.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func requeueProfileUpdating() reconcile.Result { @@ -51,7 +51,7 @@ func (s *FargateService) Reconcile(ctx context.Context) (reconcile.Result, error requeue, err := s.reconcileFargateIAMRole(ctx) if err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.FargateProfile, expinfrav1beta1.IAMFargateRolesReadyCondition, expinfrav1beta1.IAMFargateRolesReconciliationFailedReason, @@ -67,11 +67,11 @@ func (s *FargateService) Reconcile(ctx context.Context) (reconcile.Result, error return requeueRoleUpdating(), nil } - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.IAMFargateRolesReadyCondition) + v1beta1conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.IAMFargateRolesReadyCondition) requeue, err = s.reconcileFargateProfile(ctx) if err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.FargateProfile, clusterv1.ReadyCondition, expinfrav1beta1.EKSFargateReconciliationFailedReason, @@ -125,34 +125,34 @@ func (s *FargateService) handleStatus(profile *ekstypes.FargateProfile) (requeue switch profile.Status { case ekstypes.FargateProfileStatusCreating: s.scope.FargateProfile.Status.Ready = false - if conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) { - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition, expinfrav1beta1.EKSFargateCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + if v1beta1conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) { + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition, expinfrav1beta1.EKSFargateCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") } - if !conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition) { + if !v1beta1conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition) { record.Eventf(s.scope.FargateProfile, "InitiatedCreateEKSFargateProfile", "Started creating EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition) + v1beta1conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition) } - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateCreatingReason, clusterv1beta1.ConditionSeverityInfo, "") case ekstypes.FargateProfileStatusCreateFailed, ekstypes.FargateProfileStatusDeleteFailed: s.scope.FargateProfile.Status.Ready = false s.scope.FargateProfile.Status.FailureMessage = aws.String(fmt.Sprintf("unexpected profile status: %s", string(profile.Status))) reason := expinfrav1beta1.EKSFargateFailedReason s.scope.FargateProfile.Status.FailureReason = &reason - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateFailedReason, clusterv1beta1.ConditionSeverityError, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateFailedReason, clusterv1beta1.ConditionSeverityError, "") case ekstypes.FargateProfileStatusActive: s.scope.FargateProfile.Status.Ready = true - if conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition) { + if v1beta1conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition) { record.Eventf(s.scope.FargateProfile, "SuccessfulCreateEKSFargateProfile", "Created new EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition, expinfrav1beta1.EKSFargateCreatedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateCreatingCondition, expinfrav1beta1.EKSFargateCreatedReason, clusterv1beta1.ConditionSeverityInfo, "") } - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition) + v1beta1conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition) case ekstypes.FargateProfileStatusDeleting: s.scope.FargateProfile.Status.Ready = false - if !conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) { + if !v1beta1conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) { record.Eventf(s.scope.FargateProfile, "InitiatedDeleteEKSFargateProfile", "Started deleting EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) + v1beta1conditions.MarkTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) } - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateDeletingReason, clusterv1beta1.ConditionSeverityInfo, "") } switch profile.Status { case ekstypes.FargateProfileStatusCreating, ekstypes.FargateProfileStatusDeleting: @@ -168,7 +168,7 @@ func (s *FargateService) ReconcileDelete(ctx context.Context) (reconcile.Result, requeue, err := s.deleteFargateProfile(ctx) if err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.FargateProfile, clusterv1.ReadyCondition, expinfrav1beta1.EKSFargateReconciliationFailedReason, @@ -185,7 +185,7 @@ func (s *FargateService) ReconcileDelete(ctx context.Context) (reconcile.Result, err = s.deleteFargateIAMRole(ctx) if err != nil { - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.FargateProfile, expinfrav1beta1.IAMFargateRolesReadyCondition, expinfrav1beta1.IAMFargateRolesReconciliationFailedReason, @@ -273,11 +273,11 @@ func (s *FargateService) deleteFargateProfile(ctx context.Context) (requeue bool return false, errors.Wrap(err, "failed to describe profile") } if profile == nil { - if conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) { + if v1beta1conditions.IsTrue(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition) { record.Eventf(s.scope.FargateProfile, "SuccessfulDeleteEKSFargateProfile", "Deleted EKS fargate profile %s", s.scope.FargateProfile.Spec.ProfileName) - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition, expinfrav1beta1.EKSFargateDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateDeletingCondition, expinfrav1beta1.EKSFargateDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } - conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.FargateProfile, expinfrav1beta1.EKSFargateProfileReadyCondition, expinfrav1beta1.EKSFargateDeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return false, nil } diff --git a/pkg/cloud/services/elb/loadbalancer.go b/pkg/cloud/services/elb/loadbalancer.go index 258b673c71..995f599259 100644 --- a/pkg/cloud/services/elb/loadbalancer.go +++ b/pkg/cloud/services/elb/loadbalancer.go @@ -48,7 +48,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/hash" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ResourceGroups are filtered by ARN identifier: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#arns-syntax @@ -683,7 +683,7 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { return errors.Wrap(err, "failed to get control plane load balancer name") } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } @@ -691,7 +691,7 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { apiELB, err := s.describeClassicELB(ctx, elbName) if IsNotFound(err) { s.scope.Debug("Control plane load balancer not found, skipping deletion") - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } if err != nil { @@ -700,13 +700,13 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { if apiELB.IsUnmanaged(s.scope.Name()) { s.scope.Debug("Found unmanaged classic load balancer for apiserver, skipping deletion", "api-server-elb-name", apiELB.Name) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } s.scope.Debug("deleting load balancer", "name", elbName) if err := s.deleteClassicELB(ctx, elbName); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -718,7 +718,7 @@ func (s *Service) deleteAPIServerELB(ctx context.Context) error { return errors.Wrapf(err, "failed to wait for %q load balancer deletion", s.scope.Name()) } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.Info("Deleted control plane load balancer", "name", elbName) return nil } @@ -793,7 +793,7 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad if err != nil { return errors.Wrap(err, "failed to get control plane load balancer name") } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } @@ -812,7 +812,7 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad } s.scope.Debug("deleting load balancer", "name", name) if err := s.deleteLB(ctx, lb.ARN); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -824,7 +824,7 @@ func (s *Service) deleteExistingNLB(ctx context.Context, lbSpec *infrav1.AWSLoad return errors.Wrapf(err, "failed to wait for %q load balancer deletion", s.scope.Name()) } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.LoadBalancerReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.Info("Deleted control plane load balancer", "name", name) return nil @@ -1423,7 +1423,6 @@ func (s *Service) listByTag(ctx context.Context, tag string) ([]string, error) { names = append(names, name) } }) - if err != nil { record.Eventf(s.scope.InfraCluster(), "FailedListELBsByTag", "Failed to list %s ELB by Tags: %v", s.scope.Name(), err) return nil, errors.Wrapf(err, "failed to list %s ELBs by tag group", s.scope.Name()) diff --git a/pkg/cloud/services/elb/loadbalancer_test.go b/pkg/cloud/services/elb/loadbalancer_test.go index c29096cdeb..44f20819d2 100644 --- a/pkg/cloud/services/elb/loadbalancer_test.go +++ b/pkg/cloud/services/elb/loadbalancer_test.go @@ -49,7 +49,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/test/mocks" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) var stubInfraV1TargetGroupSpecAPI = infrav1.TargetGroupSpec{ @@ -2966,11 +2966,11 @@ func TestDeleteAPIServerELB(t *testing.T) { }).Return(nil, &elbtypes.AccessPointNotFoundException{}) }, verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) { - loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) + loadBalancerConditionReady := v1beta1conditions.IsTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) if loadBalancerConditionReady { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } - loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1beta1.LoadBalancerReadyCondition) + loadBalancerConditionReason := v1beta1conditions.GetReason(awsCluster, infrav1beta1.LoadBalancerReadyCondition) if loadBalancerConditionReason != clusterv1beta1.DeletedReason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } @@ -3015,11 +3015,11 @@ func TestDeleteAPIServerELB(t *testing.T) { ) }, verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) { - loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) + loadBalancerConditionReady := v1beta1conditions.IsTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) if loadBalancerConditionReady { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } - loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1beta1.LoadBalancerReadyCondition) + loadBalancerConditionReason := v1beta1conditions.GetReason(awsCluster, infrav1beta1.LoadBalancerReadyCondition) if loadBalancerConditionReason != clusterv1beta1.DeletedReason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } @@ -3077,11 +3077,11 @@ func TestDeleteAPIServerELB(t *testing.T) { ) }, verifyAWSCluster: func(awsCluster *infrav1.AWSCluster) { - loadBalancerConditionReady := conditions.IsTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) + loadBalancerConditionReady := v1beta1conditions.IsTrue(awsCluster, infrav1beta1.LoadBalancerReadyCondition) if loadBalancerConditionReady { t.Fatalf("Expected LoadBalancerReady condition to be False, but was True") } - loadBalancerConditionReason := conditions.GetReason(awsCluster, infrav1beta1.LoadBalancerReadyCondition) + loadBalancerConditionReason := v1beta1conditions.GetReason(awsCluster, infrav1beta1.LoadBalancerReadyCondition) if loadBalancerConditionReason != clusterv1beta1.DeletedReason { t.Fatalf("Expected LoadBalancerReady condition reason to be Deleted, but was %s", loadBalancerConditionReason) } diff --git a/pkg/cloud/services/network/carriergateways.go b/pkg/cloud/services/network/carriergateways.go index 2876344b75..b401980aaa 100644 --- a/pkg/cloud/services/network/carriergateways.go +++ b/pkg/cloud/services/network/carriergateways.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileCarrierGateway() error { @@ -78,7 +78,7 @@ func (s *Service) reconcileCarrierGateway() error { record.Warnf(s.scope.InfraCluster(), "FailedTagCarrierGateway", "Failed to tag managed Carrier Gateway %q: %v", cagw.CarrierGatewayId, err) return errors.Wrapf(err, "failed to tag carrier gateway %q", *cagw.CarrierGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition) return nil } diff --git a/pkg/cloud/services/network/egress_only_gateways.go b/pkg/cloud/services/network/egress_only_gateways.go index 24a411a1e4..b25877bde2 100644 --- a/pkg/cloud/services/network/egress_only_gateways.go +++ b/pkg/cloud/services/network/egress_only_gateways.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileEgressOnlyInternetGateways() error { @@ -80,7 +80,7 @@ func (s *Service) reconcileEgressOnlyInternetGateways() error { record.Warnf(s.scope.InfraCluster(), "FailedTagEgressOnlyInternetGateway", "Failed to tag managed Egress Only Internet Gateway %q: %v", gateway.EgressOnlyInternetGatewayId, err) return errors.Wrapf(err, "failed to tag egress only internet gateway %q", *gateway.EgressOnlyInternetGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition) return nil } diff --git a/pkg/cloud/services/network/gateways.go b/pkg/cloud/services/network/gateways.go index 4b06b80fa4..7d991df5ff 100644 --- a/pkg/cloud/services/network/gateways.go +++ b/pkg/cloud/services/network/gateways.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileInternetGateways() error { @@ -75,7 +75,7 @@ func (s *Service) reconcileInternetGateways() error { record.Warnf(s.scope.InfraCluster(), "FailedTagInternetGateway", "Failed to tag managed Internet Gateway %q: %v", gateway.InternetGatewayId, err) return errors.Wrapf(err, "failed to tag internet gateway %q", *gateway.InternetGatewayId) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition) return nil } diff --git a/pkg/cloud/services/network/natgateways.go b/pkg/cloud/services/network/natgateways.go index 109f2f2b23..59cb95b8d2 100644 --- a/pkg/cloud/services/network/natgateways.go +++ b/pkg/cloud/services/network/natgateways.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) func (s *Service) reconcileNatGateways() error { @@ -55,7 +55,7 @@ func (s *Service) reconcileNatGateways() error { if len(s.scope.Subnets().FilterPrivate().FilterNonCni()) == 0 { s.scope.Debug("No private subnets available, skipping NAT gateways") - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, infrav1beta1.NatGatewaysReconciliationFailedReason, @@ -64,7 +64,7 @@ func (s *Service) reconcileNatGateways() error { return nil } else if len(s.scope.Subnets().FilterPublic().FilterNonCni()) == 0 { s.scope.Debug("No public subnets available. Cannot create NAT gateways for private subnets, this might be a configuration error.") - conditions.MarkFalse( + v1beta1conditions.MarkFalse( s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, infrav1beta1.NatGatewaysReconciliationFailedReason, @@ -81,8 +81,8 @@ func (s *Service) reconcileNatGateways() error { // Batch the creation of NAT gateways if len(subnetIDs) > 0 { // set NatGatewayCreationStarted if the condition has never been set before - if !conditions.Has(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, infrav1beta1.NatGatewaysCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") + if !v1beta1conditions.Has(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition) { + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, infrav1beta1.NatGatewaysCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } @@ -101,7 +101,7 @@ func (s *Service) reconcileNatGateways() error { if err != nil { return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition) } return nil diff --git a/pkg/cloud/services/network/network.go b/pkg/cloud/services/network/network.go index 4bc9c95dc3..32f3f3da3e 100644 --- a/pkg/cloud/services/network/network.go +++ b/pkg/cloud/services/network/network.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/awserrors" infrautilconditions "sigs.k8s.io/cluster-api-provider-aws/v2/util/conditions" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // ReconcileNetwork reconciles the network of the given cluster. @@ -33,66 +33,66 @@ func (s *Service) ReconcileNetwork() (err error) { // VPC. if err := s.reconcileVPC(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, infrav1beta1.VpcReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, infrav1beta1.VpcReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition) // Secondary CIDRs if err := s.associateSecondaryCidrs(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, infrav1beta1.SecondaryCidrReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, infrav1beta1.SecondaryCidrReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition) // Subnets. if err := s.reconcileSubnets(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, infrav1beta1.SubnetsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, infrav1beta1.SubnetsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition) // Internet Gateways. if err := s.reconcileInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, infrav1beta1.InternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, infrav1beta1.InternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition) // Carrier Gateway. if err := s.reconcileCarrierGateway(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition, infrav1beta1.CarrierGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition, infrav1beta1.CarrierGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition) // Egress Only Internet Gateways. if err := s.reconcileEgressOnlyInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, infrav1beta1.EgressOnlyInternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, infrav1beta1.EgressOnlyInternetGatewayFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition) // NAT Gateways. if err := s.reconcileNatGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, infrav1beta1.NatGatewaysReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, infrav1beta1.NatGatewaysReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition) // Routing tables. if err := s.reconcileRouteTables(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, infrav1beta1.RouteTableReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, infrav1beta1.RouteTableReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition) // VPC Endpoints. if err := s.reconcileVPCEndpoints(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, infrav1beta1.VpcEndpointsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, infrav1beta1.VpcEndpointsReconciliationFailedReason, infrautilconditions.ErrorConditionAfterInit(s.scope.ClusterObj()), "%s", err.Error()) return err } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition) s.scope.Debug("Reconcile network completed successfully") return nil @@ -121,40 +121,40 @@ func (s *Service) DeleteNetwork() (err error) { vpc.DeepCopyInto(s.scope.VPC()) // VPC Endpoints. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteVPCEndpoints(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcEndpointsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Routing tables. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteRouteTables(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // NAT Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteNatGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.NatGatewaysReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // EIPs. if err := s.releaseAddresses(); err != nil { @@ -162,68 +162,68 @@ func (s *Service) DeleteNetwork() (err error) { } // Internet Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.InternetGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Carrier Gateway. if s.scope.VPC().CarrierGatewayID != nil { if err := s.deleteCarrierGateway(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.CarrierGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") } // Egress Only Internet Gateways. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteEgressOnlyInternetGateways(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.EgressOnlyInternetGatewayReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Subnets. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteSubnets(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") // Secondary CIDR. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.disassociateSecondaryCidrs(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, "DisassociateFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.SecondaryCidrsReadyCondition, "DisassociateFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } // VPC. - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } if err := s.deleteVPC(); err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") s.scope.Debug("Delete network completed successfully") return nil diff --git a/pkg/cloud/services/network/routetables.go b/pkg/cloud/services/network/routetables.go index e438ac7338..ff11dccf7b 100644 --- a/pkg/cloud/services/network/routetables.go +++ b/pkg/cloud/services/network/routetables.go @@ -34,7 +34,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/services/wait" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -123,7 +123,7 @@ func (s *Service) reconcileRouteTables() error { s.scope.Debug("Subnet has been associated with route table", "subnet-id", sn.GetResourceID(), "route-table-id", rt.ID) sn.RouteTableID = aws.String(rt.ID) } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.RouteTablesReadyCondition) return nil } @@ -266,7 +266,8 @@ func (s *Service) createRouteTableWithRoutes(routes []*ec2.CreateRouteInput, isP out, err := s.EC2Client.CreateRouteTable(context.TODO(), &ec2.CreateRouteTableInput{ VpcId: aws.String(s.scope.VPC().ID), TagSpecifications: []types.TagSpecification{ - tags.BuildParamsToTagSpecification(types.ResourceTypeRouteTable, s.getRouteTableTagParams(services.TemporaryResourceID, isPublic, zone))}, + tags.BuildParamsToTagSpecification(types.ResourceTypeRouteTable, s.getRouteTableTagParams(services.TemporaryResourceID, isPublic, zone)), + }, }) if err != nil { record.Warnf(s.scope.InfraCluster(), "FailedCreateRouteTable", "Failed to create managed RouteTable: %v", err) diff --git a/pkg/cloud/services/network/subnets.go b/pkg/cloud/services/network/subnets.go index eefff53c3f..dae83ae888 100644 --- a/pkg/cloud/services/network/subnets.go +++ b/pkg/cloud/services/network/subnets.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/cloud/tags" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/internal/cidr" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -206,7 +206,7 @@ func (s *Service) reconcileSubnets() error { } s.scope.Debug("Reconciled subnets", "subnets", subnets) - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.SubnetsReadyCondition) return nil } diff --git a/pkg/cloud/services/network/vpc.go b/pkg/cloud/services/network/vpc.go index e8fe82ec3d..64590d67cc 100644 --- a/pkg/cloud/services/network/vpc.go +++ b/pkg/cloud/services/network/vpc.go @@ -38,7 +38,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -138,8 +138,8 @@ func (s *Service) reconcileVPC() error { s.scope.VPC().Tags = vpc.Tags s.scope.VPC().ID = vpc.ID - if !conditions.Has(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition) { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, infrav1beta1.VpcCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") + if !v1beta1conditions.Has(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition) { + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.VpcReadyCondition, infrav1beta1.VpcCreationStartedReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return errors.Wrap(err, "failed to patch conditions") } diff --git a/pkg/cloud/services/securitygroup/securitygroups.go b/pkg/cloud/services/securitygroup/securitygroups.go index 1323faeb6f..1213cf1828 100644 --- a/pkg/cloud/services/securitygroup/securitygroups.go +++ b/pkg/cloud/services/securitygroup/securitygroups.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/record" "sigs.k8s.io/cluster-api-provider-aws/v2/pkg/utils" clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const ( @@ -198,7 +198,7 @@ func (s *Service) ReconcileSecurityGroups() error { s.scope.Debug("Authorized ingress rules in security group", "authorized-ingress-rules", toAuthorize, "security-group-id", sg.ID) } } - conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition) + v1beta1conditions.MarkTrue(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition) return nil } @@ -309,7 +309,7 @@ func (s *Service) ec2SecurityGroupToSecurityGroup(ec2SecurityGroup types.Securit func (s *Service) DeleteSecurityGroups() error { if s.scope.VPC().ID == "" { s.scope.Debug("Skipping security group deletion, vpc-id is nil", "vpc-id", s.scope.VPC().ID) - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } @@ -323,7 +323,7 @@ func (s *Service) DeleteSecurityGroups() error { return nil } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletingReason, clusterv1beta1.ConditionSeverityInfo, "") if err := s.scope.PatchObject(); err != nil { return err } @@ -332,7 +332,7 @@ func (s *Service) DeleteSecurityGroups() error { sg := clusterGroups[i] current := sg.IngressRules if err := s.revokeAllSecurityGroupIngressRules(sg.ID); awserrors.IsIgnorableSecurityGroupError(err) != nil { //nolint:gocritic - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } @@ -344,10 +344,10 @@ func (s *Service) DeleteSecurityGroups() error { } if err != nil { - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, "DeletingFailed", clusterv1beta1.ConditionSeverityWarning, "%s", err.Error()) return err } - conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") + v1beta1conditions.MarkFalse(s.scope.InfraCluster(), infrav1beta1.ClusterSecurityGroupsReadyCondition, clusterv1beta1.DeletedReason, clusterv1beta1.ConditionSeverityInfo, "") return nil } diff --git a/test/e2e/suites/unmanaged/helpers_test.go b/test/e2e/suites/unmanaged/helpers_test.go index 5f88d4219d..fd5f46896f 100644 --- a/test/e2e/suites/unmanaged/helpers_test.go +++ b/test/e2e/suites/unmanaged/helpers_test.go @@ -56,7 +56,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) // GetClusterByName returns a Cluster object given his name. @@ -424,7 +424,7 @@ func hasAWSClusterConditions(m *infrav1.AWSCluster, expected []conditionAssertio return false } for _, c := range expected { - actual := conditions.Get(m, c.conditionType) + actual := v1beta1conditions.Get(m, c.conditionType) if actual == nil { return false } diff --git a/test/e2e/suites/unmanaged/unmanaged_functional_test.go b/test/e2e/suites/unmanaged/unmanaged_functional_test.go index 614c6ddbff..018a346287 100644 --- a/test/e2e/suites/unmanaged/unmanaged_functional_test.go +++ b/test/e2e/suites/unmanaged/unmanaged_functional_test.go @@ -44,7 +44,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" ) const TestSvc = "test-svc-" @@ -966,8 +966,8 @@ var _ = ginkgo.Context("[unmanaged] [functional]", func() { return true } Expect(err).To(BeNil()) - return conditions.IsFalse(awsCluster, infrav1.VpcEndpointsReadyCondition) && - conditions.GetReason(awsCluster, infrav1.VpcEndpointsReadyCondition) == clusterv1beta1.DeletedReason + return v1beta1conditions.IsFalse(awsCluster, infrav1.VpcEndpointsReadyCondition) && + v1beta1conditions.GetReason(awsCluster, infrav1.VpcEndpointsReadyCondition) == clusterv1beta1.DeletedReason }, e2eCtx.E2EConfig.GetIntervals("", "wait-delete-cluster")...).Should(BeTrue(), "Eventually failed waiting for AWSCluster to show VPC endpoint as deleted in conditions") }) diff --git a/util/paused/paused.go b/util/paused/paused.go index 8390f2c005..74a891e3c9 100644 --- a/util/paused/paused.go +++ b/util/paused/paused.go @@ -35,19 +35,19 @@ import ( clusterv1beta1 "sigs.k8s.io/cluster-api/api/core/v1beta1" clusterv1 "sigs.k8s.io/cluster-api/api/core/v1beta2" "sigs.k8s.io/cluster-api/util/annotations" - "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" + v1beta1conditions "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/conditions" v1beta1patch "sigs.k8s.io/cluster-api/util/deprecated/v1beta1/patch" ) // ConditionSetter combines the client.Object and Setter interface. type ConditionSetter interface { - conditions.Setter + v1beta1conditions.Setter client.Object } // EnsurePausedCondition sets the paused condition on the object and returns if it should be considered as paused. func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, obj ConditionSetter) (isPaused bool, conditionChanged bool, err error) { - oldCondition := conditions.Get(obj, clusterv1beta1.PausedV1Beta2Condition) + oldCondition := v1beta1conditions.Get(obj, clusterv1beta1.PausedV1Beta2Condition) newCondition := pausedCondition(c.Scheme(), cluster, obj, string(clusterv1beta1.PausedV1Beta2Condition)) isPaused = newCondition.Status == corev1.ConditionTrue @@ -55,7 +55,7 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste log := ctrl.LoggerFrom(ctx) // Return early if the paused condition did not change. - if oldCondition != nil && conditions.HasSameState(oldCondition, &newCondition) { + if oldCondition != nil && v1beta1conditions.HasSameState(oldCondition, &newCondition) { if isPaused { log.V(6).Info("Reconciliation is paused for this object", "reason", newCondition.Message) } @@ -73,7 +73,7 @@ func EnsurePausedCondition(ctx context.Context, c client.Client, cluster *cluste log.V(4).Info("Unpausing reconciliation for this object") } - conditions.Set(obj, &newCondition) + v1beta1conditions.Set(obj, &newCondition) if err := patchHelper.Patch(ctx, obj, v1beta1patch.WithOwnedConditions{Conditions: []clusterv1beta1.ConditionType{ clusterv1beta1.PausedV1Beta2Condition,