diff --git a/cmd/cloud-controller-manager/app/controllermanager.go b/cmd/cloud-controller-manager/app/controllermanager.go index 7786447298..19e704bad4 100644 --- a/cmd/cloud-controller-manager/app/controllermanager.go +++ b/cmd/cloud-controller-manager/app/controllermanager.go @@ -214,7 +214,7 @@ func RunWrapper(s *options.CloudControllerManagerOptions, c *cloudcontrollerconf logger := log.Background().WithName("RunWrapper") return func(ctx context.Context) { if !c.DynamicReloadingConfig.EnableDynamicReloading { - klog.V(1).Infof("using static initialization from config file %s", c.ComponentConfig.KubeCloudShared.CloudProvider.CloudConfigFile) + logger.V(1).Info("using static initialization from config file", "cloudConfigFile", c.ComponentConfig.KubeCloudShared.CloudProvider.CloudConfigFile) if err := Run(ctx, c.Complete(), h); err != nil { klog.Errorf("RunWrapper: failed to start cloud controller manager: %v", err) os.Exit(1) @@ -224,10 +224,10 @@ func RunWrapper(s *options.CloudControllerManagerOptions, c *cloudcontrollerconf cloudConfigFile := c.ComponentConfig.KubeCloudShared.CloudProvider.CloudConfigFile if cloudConfigFile != "" { - klog.V(1).Infof("RunWrapper: using dynamic initialization from config file %s, starting the file watcher", cloudConfigFile) + logger.V(1).Info("using dynamic initialization from config file, starting the file watcher", "cloudConfigFile", cloudConfigFile) updateCh = dynamic.RunFileWatcherOrDie(cloudConfigFile) } else { - klog.V(1).Infof("RunWrapper: using dynamic initialization from secret %s/%s, starting the secret watcher", c.DynamicReloadingConfig.CloudConfigSecretNamespace, c.DynamicReloadingConfig.CloudConfigSecretName) + logger.V(1).Info("using dynamic initialization from secret, starting the secret watcher", "namespace", c.DynamicReloadingConfig.CloudConfigSecretNamespace, "name", c.DynamicReloadingConfig.CloudConfigSecretName) updateCh = dynamic.RunSecretWatcherOrDie(c) } @@ -236,7 +236,7 @@ func RunWrapper(s *options.CloudControllerManagerOptions, c *cloudcontrollerconf for { select { case <-updateCh: - klog.V(2).Info("RunWrapper: detected the cloud config has been updated, re-constructing the cloud controller manager") + logger.V(2).Info("detected the cloud config has been updated, re-constructing the cloud controller manager") // stop the previous goroutines cancelFunc() @@ -289,6 +289,7 @@ func shouldDisableCloudProvider(configFilePath string) (bool, error) { } func runAsync(s *options.CloudControllerManagerOptions, errCh chan error, h *controllerhealthz.MutableHealthzHandler) context.CancelFunc { + logger := log.Background().WithName("runAsync") ctx, cancelFunc := context.WithCancel(context.Background()) go func() { @@ -303,7 +304,7 @@ func runAsync(s *options.CloudControllerManagerOptions, errCh chan error, h *con errCh <- err } - klog.V(1).Infof("RunAsync: stopping") + logger.V(1).Info("stopping") }() return cancelFunc @@ -415,7 +416,7 @@ func startControllers(ctx context.Context, controllerContext genericcontrollerma continue } - klog.V(1).Infof("Starting %q", controllerName) + logger.V(1).Info("Starting controller", "controller", controllerName) ctrl, started, err := initFn(ctx, controllerContext, completedConfig, cloud) if err != nil { klog.Errorf("Error starting %q: %s", controllerName, err.Error()) @@ -448,11 +449,11 @@ func startControllers(ctx context.Context, controllerContext genericcontrollerma klog.Fatalf("Failed to wait for apiserver being healthy: %v", err) } - klog.V(2).Infof("startControllers: starting shared informers") + logger.V(2).Info("startControllers: starting shared informers") completedConfig.SharedInformers.Start(ctx.Done()) controllerContext.InformerFactory.Start(ctx.Done()) <-ctx.Done() - klog.V(1).Infof("startControllers: received stopping signal, exiting") + logger.V(1).Info("startControllers: received stopping signal, exiting") return nil } diff --git a/cmd/cloud-controller-manager/app/dynamic/secret_watcher.go b/cmd/cloud-controller-manager/app/dynamic/secret_watcher.go index 1bab3becdf..d37010f155 100644 --- a/cmd/cloud-controller-manager/app/dynamic/secret_watcher.go +++ b/cmd/cloud-controller-manager/app/dynamic/secret_watcher.go @@ -30,6 +30,7 @@ import ( cloudcontrollerconfig "sigs.k8s.io/cloud-provider-azure/cmd/cloud-controller-manager/app/config" "sigs.k8s.io/cloud-provider-azure/cmd/cloud-controller-manager/app/options" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) type SecretWatcher struct { @@ -64,6 +65,7 @@ func RunSecretWatcherOrDie(c *cloudcontrollerconfig.Config) chan struct{} { // NewSecretWatcher creates a SecretWatcher and a signal channel to indicate // the specific secret has been updated func NewSecretWatcher(informerFactory informers.SharedInformerFactory, secretName, secretNamespace string) (*SecretWatcher, chan struct{}) { + logger := log.Background().WithName("NewSecretWatcher") secretInformer := informerFactory.Core().V1().Secrets() updateSignal := make(chan struct{}) @@ -79,7 +81,7 @@ func NewSecretWatcher(informerFactory informers.SharedInformerFactory, secretNam if strings.EqualFold(newSecret.Name, secretName) && strings.EqualFold(newSecret.Namespace, secretNamespace) { - klog.V(1).Infof("secret %s updated, sending the signal", newSecret.Name) + logger.V(1).Info("secret updated, sending the signal", "secretName", newSecret.Name) updateSignal <- struct{}{} } }, diff --git a/cmd/cloud-node-manager/app/nodemanager.go b/cmd/cloud-node-manager/app/nodemanager.go index 6dd060e326..820e03930d 100644 --- a/cmd/cloud-node-manager/app/nodemanager.go +++ b/cmd/cloud-node-manager/app/nodemanager.go @@ -125,7 +125,7 @@ func Run(ctx context.Context, c *cloudnodeconfig.Config) error { // startControllers starts the cloud specific controller loops. func startControllers(ctx context.Context, c *cloudnodeconfig.Config, healthzHandler *controllerhealthz.MutableHealthzHandler) error { logger := log.Background().WithName("startControllers") - klog.V(1).Infof("Starting cloud-node-manager...") + logger.V(1).Info("Starting cloud-node-manager...") // Start the CloudNodeController nodeController := nodemanager.NewCloudNodeController( diff --git a/health-probe-proxy/go.mod b/health-probe-proxy/go.mod index 9c0e97aeeb..f81155a733 100644 --- a/health-probe-proxy/go.mod +++ b/health-probe-proxy/go.mod @@ -12,9 +12,9 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/go-logr/logr v1.4.2 // indirect + github.com/go-logr/logr v1.4.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -22,25 +22,26 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.22.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.23.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/spf13/cobra v1.9.1 // indirect - github.com/spf13/pflag v1.0.6 // indirect + github.com/spf13/pflag v1.0.7 // indirect github.com/x448/float16 v0.8.4 // indirect - go.opentelemetry.io/otel v1.35.0 // indirect - go.opentelemetry.io/otel/trace v1.35.0 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect - golang.org/x/net v0.39.0 // indirect - golang.org/x/sys v0.32.0 // indirect - golang.org/x/text v0.24.0 // indirect - google.golang.org/protobuf v1.36.5 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sys v0.35.0 // indirect + golang.org/x/text v0.28.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/apimachinery v0.34.1 // indirect - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 // indirect - sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d // indirect + sigs.k8s.io/cloud-provider-azure v1.34.3 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) diff --git a/health-probe-proxy/go.sum b/health-probe-proxy/go.sum index 1056a198b8..e815df005c 100644 --- a/health-probe-proxy/go.sum +++ b/health-probe-proxy/go.sum @@ -8,10 +8,14 @@ github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6N github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -45,14 +49,24 @@ github.com/pires/go-proxyproto v0.8.1 h1:9KEixbdJfhrbtjpz/ZwCdWDD2Xem0NZ38qMYaAS github.com/pires/go-proxyproto v0.8.1/go.mod h1:ZKAAyp3cgy5Y5Mo4n9AlScrkCZwUy0g3Jf+slqQVcuU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -60,6 +74,8 @@ github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= @@ -70,8 +86,12 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= @@ -89,6 +109,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -97,10 +119,14 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI= +golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -111,6 +137,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -126,8 +154,14 @@ k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/cloud-provider-azure v1.34.3 h1:dk+siFumvax/D5UCDeK9565wSA2w4wKXAm8vSt6Ifuw= +sigs.k8s.io/cloud-provider-azure v1.34.3/go.mod h1:5f72ArdFuG1iNgBOR15a7LXN+z65WRmP3Ua3e+3V0vU= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= diff --git a/health-probe-proxy/main.go b/health-probe-proxy/main.go index 42fe7d2a64..e8adec14bd 100644 --- a/health-probe-proxy/main.go +++ b/health-probe-proxy/main.go @@ -29,9 +29,11 @@ import ( "k8s.io/component-base/logs" "k8s.io/klog/v2" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) func main() { + logger := log.Background().WithName("main") logs.InitLogs() defer logs.FlushLogs() @@ -43,17 +45,17 @@ func main() { targetUrl, _ := url.Parse(fmt.Sprintf("http://localhost:%s", strconv.Itoa(targetPort))) proxy := httputil.NewSingleHostReverseProxy(targetUrl) - klog.Infof("target url: %s", targetUrl) + logger.Info("", "target url", targetUrl) http.Handle("/", proxy) - klog.Infof("proxying from port %d to port %d", healthCheckPort, targetPort) + logger.Info("proxying between ports", "from", healthCheckPort, "to", targetPort) listener, err := net.Listen("tcp", fmt.Sprintf("0.0.0.0:%s", strconv.Itoa(healthCheckPort))) if err != nil { klog.Errorf("failed to listen on port %d: %s", targetPort, err) panic(err) } - klog.Infof("listening on port %d", healthCheckPort) + logger.Info("listening on port", "port", healthCheckPort) proxyListener := &proxyproto.Listener{Listener: listener} defer func(proxyListener *proxyproto.Listener) { @@ -64,7 +66,7 @@ func main() { } }(proxyListener) - klog.Infof("listening on port with proxy listener %d", healthCheckPort) + logger.Info("listening on port with proxy listener", "port", healthCheckPort) err = http.Serve(proxyListener, nil) if err != nil { klog.Errorf("failed to serve: %s", err) diff --git a/kubetest2-aks/deployer/build.go b/kubetest2-aks/deployer/build.go index 6edea5c088..b2b60dd9d2 100644 --- a/kubetest2-aks/deployer/build.go +++ b/kubetest2-aks/deployer/build.go @@ -22,8 +22,8 @@ import ( git "github.com/go-git/go-git/v5" plumbing "github.com/go-git/go-git/v5/plumbing" - "k8s.io/klog" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/kubetest2/pkg/exec" ) @@ -87,7 +87,8 @@ func (d *deployer) makeCloudProviderImages(path string) (string, error) { // makeCloudProviderImagesByPath makes CCM or CNM images with repo path. func (d *deployer) makeCloudProviderImagesByPath() (string, error) { - klog.Infof("Making Cloud provider images with repo path") + logger := log.Background().WithName("makeCloudProviderImagesByPath") + logger.Info("Making Cloud provider images with repo path") path := d.TargetPath return d.makeCloudProviderImages(path) @@ -95,7 +96,8 @@ func (d *deployer) makeCloudProviderImagesByPath() (string, error) { // makeCloudProviderImagesByTag makes CCM or CNM images with repo refs. func (d *deployer) makeCloudProviderImagesByTag(url string) (string, error) { - klog.Infof("Making Cloud provider images with refs") + logger := log.Background().WithName("makeCloudProviderImagesByTag") + logger.Info("Making Cloud provider images with refs") ccmPath := fmt.Sprintf("%s/cloud-provider-azure", gitClonePath) repo, err := git.PlainClone(ccmPath, false, &git.CloneOptions{ @@ -118,6 +120,7 @@ func (d *deployer) makeCloudProviderImagesByTag(url string) (string, error) { } func (d *deployer) Build() error { + logger := log.Background().WithName("Build") err := d.verifyBuildFlags() if err != nil { return fmt.Errorf("failed to verify build flags: %v", err) @@ -134,7 +137,7 @@ func (d *deployer) Build() error { return fmt.Errorf("failed to make Cloud provider image with tag %q: %v", d.TargetTag, err) } } - klog.Infof("cloud-provider-azure image with tag %q are ready", imageTag) + logger.Info("cloud-provider-azure image are ready", "imageTag", imageTag) } return nil diff --git a/kubetest2-aks/deployer/down.go b/kubetest2-aks/deployer/down.go index 19df3b8f63..6742dd5044 100644 --- a/kubetest2-aks/deployer/down.go +++ b/kubetest2-aks/deployer/down.go @@ -23,10 +23,12 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/azidentity" "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/resources/armresources" "k8s.io/klog" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) func (d *deployer) deleteResourceGroup(subscriptionID string, credential azcore.TokenCredential) error { - klog.Infof("Deleting resource group %q", d.ResourceGroupName) + logger := log.Background().WithName("deleteResourceGroup") + logger.Info("Deleting resource group", "resourceGroup", d.ResourceGroupName) rgClient, _ := armresources.NewResourceGroupsClient(subscriptionID, credential, nil) poller, err := rgClient.BeginDelete(ctx, d.ResourceGroupName, nil) @@ -40,6 +42,7 @@ func (d *deployer) deleteResourceGroup(subscriptionID string, credential azcore. } func (d *deployer) Down() error { + logger := log.Background().WithName("Down") // Create a credentials object. cred, err := azidentity.NewDefaultAzureCredential(nil) if err != nil { @@ -51,6 +54,6 @@ func (d *deployer) Down() error { klog.Fatalf("failed to delete resource group %q: %v", d.ResourceGroupName, err) } - klog.Infof("Resource group %q deleted", d.ResourceGroupName) + logger.Info("Resource group deleted", "resourceGroup", d.ResourceGroupName) return nil } diff --git a/kubetest2-aks/deployer/up.go b/kubetest2-aks/deployer/up.go index 18ef6630d1..b2b69bcc2b 100644 --- a/kubetest2-aks/deployer/up.go +++ b/kubetest2-aks/deployer/up.go @@ -39,6 +39,7 @@ import ( "k8s.io/klog" "k8s.io/utils/ptr" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/kubetest2/pkg/exec" ) @@ -210,6 +211,7 @@ func (d *deployer) prepareCustomConfig() ([]byte, error) { // prepareClusterConfig generates cluster config. func (d *deployer) prepareClusterConfig(clusterID string) (*armcontainerservicev2.ManagedCluster, string, error) { + logger := log.Background().WithName("prepareClusterConfig") configFile, err := openPath(d.ConfigPath) if err != nil { return nil, "", fmt.Errorf("failed to read cluster config file at %q: %v", d.ConfigPath, err) @@ -230,12 +232,12 @@ func (d *deployer) prepareClusterConfig(clusterID string) (*armcontainerservicev return nil, "", fmt.Errorf("failed to prepare custom config: %v", err) } - klog.Infof("Customized configurations are: %s", string(customConfig)) + logger.Info("Customized configurations", "config", string(customConfig)) encodedCustomConfig := base64.StdEncoding.EncodeToString(customConfig) clusterConfig = strings.ReplaceAll(clusterConfig, "{CUSTOM_CONFIG}", encodedCustomConfig) - klog.Infof("AKS cluster config without credential: %s", clusterConfig) + logger.Info("AKS cluster config without credential", "config", clusterConfig) mcConfig := &armcontainerservicev2.ManagedCluster{} err = json.Unmarshal([]byte(clusterConfig), mcConfig) @@ -248,10 +250,11 @@ func (d *deployer) prepareClusterConfig(clusterID string) (*armcontainerservicev } func updateAzureCredential(mcConfig *armcontainerservicev2.ManagedCluster) { - klog.Infof("Updating Azure credentials to manage cluster resource group") + logger := log.Background().WithName("updateAzureCredential") + logger.Info("Updating Azure credentials to manage cluster resource group") if len(clientID) != 0 && len(clientSecret) != 0 { - klog.Infof("Service principal is used to manage cluster resource group") + logger.Info("Service principal is used to manage cluster resource group") // Reset `Identity` in case managed identity is defined in templates while service principal is used. mcConfig.Identity = nil mcConfig.Properties.ServicePrincipalProfile = &armcontainerservicev2.ManagedClusterServicePrincipalProfile{ @@ -262,7 +265,7 @@ func updateAzureCredential(mcConfig *armcontainerservicev2.ManagedCluster) { } // Managed identity is preferable over service principal and picked by default when creating an AKS cluster. // TODO(mainred): we can consider supporting user-assigned managed identity. - klog.Infof("System assigned managed identity is used to manage cluster resource group") + logger.Info("System assigned managed identity is used to manage cluster resource group") // Reset `ServicePrincipalProfile` in case service principal is defined in templates while managed identity is used. mcConfig.Properties.ServicePrincipalProfile = nil systemAssignedIdentity := armcontainerservicev2.ResourceIdentityTypeSystemAssigned @@ -273,7 +276,8 @@ func updateAzureCredential(mcConfig *armcontainerservicev2.ManagedCluster) { // createAKSWithCustomConfig creates an AKS cluster with custom configuration. func (d *deployer) createAKSWithCustomConfig() error { - klog.Infof("Creating the AKS cluster with custom config") + logger := log.Background().WithName("createAKSWithCustomConfig") + logger.Info("Creating the AKS cluster with custom config") clusterID := fmt.Sprintf("/subscriptions/%s/resourcegroups/%s/providers/Microsoft.ContainerService/managedClusters/%s", subscriptionID, d.ResourceGroupName, d.ClusterName) mcConfig, encodedCustomConfig, err := d.prepareClusterConfig(clusterID) @@ -307,13 +311,14 @@ func (d *deployer) createAKSWithCustomConfig() error { return fmt.Errorf("failed to put resource: %v", err.Error()) } - klog.Infof("An AKS cluster %q in resource group %q is created", d.ClusterName, d.ResourceGroupName) + logger.Info("An AKS cluster is created", "clusterName", d.ClusterName, "resourceGroup", d.ResourceGroupName) return nil } // getAKSKubeconfig gets kubeconfig of the AKS cluster and writes it to specific path. func (d *deployer) getAKSKubeconfig() error { - klog.Infof("Retrieving AKS cluster's kubeconfig") + logger := log.Background().WithName("getAKSKubeconfig") + logger.Info("Retrieving AKS cluster's kubeconfig") client, err := armcontainerservicev2.NewManagedClustersClient(subscriptionID, cred, nil) if err != nil { return fmt.Errorf("failed to new managed cluster client with sub ID %q: %v", subscriptionID, err) @@ -324,7 +329,7 @@ func (d *deployer) getAKSKubeconfig() error { resp, err = client.ListClusterUserCredentials(ctx, d.ResourceGroupName, d.ClusterName, nil) if err != nil { if strings.Contains(err.Error(), "404 Not Found") { - klog.Infof("failed to list cluster user credentials for 1 minute, retrying") + logger.Info("failed to list cluster user credentials for 1 minute, retrying") return false, nil } return false, fmt.Errorf("failed to list cluster user credentials with resource group name %q, cluster ID %q: %v", d.ResourceGroupName, d.ClusterName, err) @@ -349,7 +354,7 @@ func (d *deployer) getAKSKubeconfig() error { return fmt.Errorf("failed to write kubeconfig to %s", destPath) } - klog.Infof("Succeeded in getting kubeconfig of cluster %q in resource group %q", d.ClusterName, d.ResourceGroupName) + logger.Info("Succeeded in getting kubeconfig of cluster", "clusterName", d.ClusterName, "resourceGroup", d.ResourceGroupName) return nil } @@ -380,6 +385,7 @@ func (d *deployer) verifyUpFlags() error { } func (d *deployer) Up() error { + logger := log.Background().WithName("Up") if err := d.verifyUpFlags(); err != nil { return fmt.Errorf("up flags are invalid: %v", err) } @@ -389,7 +395,7 @@ func (d *deployer) Up() error { if err != nil { return fmt.Errorf("failed to create the resource group: %v", err) } - klog.Infof("Resource group %s created", *resourceGroup.ResourceGroup.ID) + logger.Info("Resource group created", "resourceGroupID", *resourceGroup.ResourceGroup.ID) // Create the AKS cluster if err := d.createAKSWithCustomConfig(); err != nil { diff --git a/kubetest2-aks/go.mod b/kubetest2-aks/go.mod index 1ef55c357e..967882db13 100644 --- a/kubetest2-aks/go.mod +++ b/kubetest2-aks/go.mod @@ -23,30 +23,57 @@ require ( github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect github.com/Microsoft/go-winio v0.6.2 // indirect github.com/ProtonMail/go-crypto v1.3.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.6.1 // indirect github.com/cyphar/filepath-securejoin v0.4.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emirpasic/gods v1.18.1 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect github.com/go-git/go-billy/v5 v5.6.2 // indirect github.com/go-logr/logr v1.4.3 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect github.com/kevinburke/ssh_config v1.4.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pjbgf/sha1cd v0.4.0 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.23.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.17.0 // indirect github.com/sergi/go-diff v1.4.0 // indirect github.com/skeema/knownhosts v1.3.1 // indirect github.com/spf13/cobra v1.9.1 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect + go.opentelemetry.io/otel v1.37.0 // indirect + go.opentelemetry.io/otel/trace v1.37.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect golang.org/x/crypto v0.45.0 // indirect golang.org/x/net v0.47.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/text v0.31.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect + k8s.io/component-base v0.34.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect + sigs.k8s.io/cloud-provider-azure v1.34.3 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) diff --git a/kubetest2-aks/go.sum b/kubetest2-aks/go.sum index 3d2d6ada85..08046c4c66 100644 --- a/kubetest2-aks/go.sum +++ b/kubetest2-aks/go.sum @@ -32,6 +32,13 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0= github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -45,6 +52,8 @@ github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= @@ -58,24 +67,31 @@ github.com/go-git/go-git/v5 v5.16.4/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lo github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PWkxoFkQ= github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M= github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= @@ -85,6 +101,14 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/octago/sflags v0.3.1 h1:LW65z20iAQKteEyjsnnc+/lyoCUnIoRuAocggr6RB6A= github.com/octago/sflags v0.3.1/go.mod h1:hVUkbnYwMU9kZiZJyOAIVN56YiVMMPxgJ46kRZ19jh0= github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= @@ -97,6 +121,16 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -112,18 +146,44 @@ github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -135,14 +195,27 @@ golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -151,11 +224,21 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/apimachinery v0.34.2 h1:zQ12Uk3eMHPxrsbUJgNF8bTauTVR2WgqJsTmwTE/NW4= k8s.io/apimachinery v0.34.2/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= +k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/cloud-provider-azure v1.34.3 h1:dk+siFumvax/D5UCDeK9565wSA2w4wKXAm8vSt6Ifuw= +sigs.k8s.io/cloud-provider-azure v1.34.3/go.mod h1:5f72ArdFuG1iNgBOR15a7LXN+z65WRmP3Ua3e+3V0vU= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= sigs.k8s.io/kubetest2 v0.0.0-20250820195306-f71fd4c1cc1a h1:wa7AhZEeL0IgQP1dgpOskoU/jc9KzNbQemu1lbmx8Q8= sigs.k8s.io/kubetest2 v0.0.0-20250820195306-f71fd4c1cc1a/go.mod h1:KCWjzDnj7tqUREqslYVb2qN/De3f2X7S+k/I3i4rbyA= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= diff --git a/pkg/azclient/go.mod b/pkg/azclient/go.mod index 8370140d86..6e68d64662 100644 --- a/pkg/azclient/go.mod +++ b/pkg/azclient/go.mod @@ -31,7 +31,7 @@ require ( golang.org/x/time v0.14.0 gopkg.in/dnaeon/go-vcr.v3 v3.2.0 k8s.io/klog/v2 v2.130.1 - k8s.io/utils v0.0.0-20241210054802-24370beab758 + k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d ) require ( @@ -40,21 +40,48 @@ require ( github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect github.com/Masterminds/semver/v3 v3.4.0 // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.3.0 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect + github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect github.com/kylelemons/godebug v1.1.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.23.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.65.0 // indirect + github.com/prometheus/procfs v0.17.0 // indirect + github.com/spf13/cobra v1.9.1 // indirect + github.com/spf13/pflag v1.0.7 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.yaml.in/yaml/v2 v2.4.2 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect + golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b // indirect golang.org/x/mod v0.29.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/text v0.31.0 // indirect golang.org/x/tools v0.38.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apimachinery v0.34.0 // indirect + k8s.io/component-base v0.34.0 // indirect + sigs.k8s.io/cloud-provider-azure v1.34.3 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect ) diff --git a/pkg/azclient/go.sum b/pkg/azclient/go.sum index 514e85d2eb..8af72e6d72 100644 --- a/pkg/azclient/go.sum +++ b/pkg/azclient/go.sum @@ -50,10 +50,23 @@ github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1 github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/antlr4-go/antlr/v4 v4.13.1 h1:SqQKkuVZ+zWkMMNkjy5FZe5mr5WURWnlpmOuzYWrPrQ= github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmOABFgjdkM7Nw= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= @@ -70,18 +83,27 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU= github.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -92,6 +114,14 @@ github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= @@ -100,8 +130,26 @@ github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmd github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.23.0 h1:ust4zpdl9r4trLY/gSjlm07PuiBq2ynaXXlptpfy8Uc= +github.com/prometheus/client_golang v1.23.0/go.mod h1:i/o0R9ByOnHX0McrTMTyhYvKE4haaf2mW08I+jGAjEE= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.65.0 h1:QDwzd+G1twt//Kwj/Ww6E9FQq1iVMmODnILtW1t2VzE= +github.com/prometheus/common v0.65.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= +github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0= +github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= +github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= +github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.7 h1:vN6T9TfwStFPFM5XzjsvmzZkLuaLX+HS+0SeFLRgU6M= +github.com/spf13/pflag v1.0.7/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= @@ -112,6 +160,10 @@ github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= @@ -122,39 +174,86 @@ go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJr go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y= go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU= +go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= +go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM= golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= +golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b h1:DXr+pvt3nC887026GRP39Ej11UATqWDmWuS99x26cD0= +golang.org/x/exp v0.0.0-20250819193227-8b4c13bb791b/go.mod h1:4QTo5u+SEIbbKW1RacMZq1YEfOBqeXa19JeshGi+zc4= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A= google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/dnaeon/go-vcr.v3 v3.2.0 h1:Rltp0Vf+Aq0u4rQXgmXgtgoRDStTnFN83cWgSGSoRzM= gopkg.in/dnaeon/go-vcr.v3 v3.2.0/go.mod h1:2IMOnnlx9I6u9x+YBsM3tAMx6AlOxnJ0pWxQAzZ79Ag= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/apimachinery v0.34.0 h1:eR1WO5fo0HyoQZt1wdISpFDffnWOvFLOOeJ7MgIv4z0= +k8s.io/apimachinery v0.34.0/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= +k8s.io/component-base v0.34.0 h1:bS8Ua3zlJzapklsB1dZgjEJuJEeHjj8yTu1gxE2zQX8= +k8s.io/component-base v0.34.0/go.mod h1:RSCqUdvIjjrEm81epPcjQ/DS+49fADvGSCkIP3IC6vg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d h1:wAhiDyZ4Tdtt7e46e9M5ZSAJ/MnPGPs+Ki1gHw4w1R0= +k8s.io/utils v0.0.0-20250820121507-0af2bda4dd1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/cloud-provider-azure v1.34.3 h1:dk+siFumvax/D5UCDeK9565wSA2w4wKXAm8vSt6Ifuw= +sigs.k8s.io/cloud-provider-azure v1.34.3/go.mod h1:5f72ArdFuG1iNgBOR15a7LXN+z65WRmP3Ua3e+3V0vU= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= +sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= diff --git a/pkg/azclient/policy/retryaftermin/retryaftermin.go b/pkg/azclient/policy/retryaftermin/retryaftermin.go index b309e73d08..62608d50cd 100644 --- a/pkg/azclient/policy/retryaftermin/retryaftermin.go +++ b/pkg/azclient/policy/retryaftermin/retryaftermin.go @@ -23,7 +23,8 @@ import ( "time" "github.com/Azure/azure-sdk-for-go/sdk/azcore/policy" - "k8s.io/klog/v2" + + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) // Policy is a policy that enforces a minimum retry-after value @@ -45,6 +46,7 @@ func (p *Policy) GetMinRetryAfter() time.Duration { // Do implements the policy.Policy interface func (p *Policy) Do(req *policy.Request) (*http.Response, error) { + logger := log.Background().WithName("Do") resp, err := req.Next() // If the request failed or the status code is >= 300, return if err != nil || resp == nil || resp.StatusCode >= 300 { @@ -54,7 +56,7 @@ func (p *Policy) Do(req *policy.Request) (*http.Response, error) { // Check if the response retry-after header is less than the minimum overrideRetryAfter := func(header http.Header, headerName string, retryAfter time.Duration) { if retryAfter < p.minRetryAfter { - klog.V(5).Infof("RetryAfterMinPolicy: retry-after value %s is less than minimum %s, removing retry-after header..", retryAfter, p.minRetryAfter) + logger.V(5).Info("RetryAfterMinPolicy: retry-after value is less than minimum, removing retry-after header", "retryAfter", retryAfter, "minimum", p.minRetryAfter) header.Del(headerName) } } @@ -76,7 +78,7 @@ func (p *Policy) Do(req *policy.Request) (*http.Response, error) { // If the retry-after value is less than the minimum, remove it overrideRetryAfter(resp.Header, headerName, retryDuration) } else { - klog.V(5).Infof("RetryAfterMinPolicy: not modifying %s header with unrecognized format: %s", headerName, retryAfter) + logger.V(5).Info("RetryAfterMinPolicy: not modifying header with unrecognized format", "headerName", headerName, "unrecognized format", retryAfter) } } } diff --git a/pkg/credentialprovider/azure_acr_helper.go b/pkg/credentialprovider/azure_acr_helper.go index 048e902177..9ba287db5f 100644 --- a/pkg/credentialprovider/azure_acr_helper.go +++ b/pkg/credentialprovider/azure_acr_helper.go @@ -58,7 +58,8 @@ import ( "unicode" utilnet "k8s.io/apimachinery/pkg/util/net" - "k8s.io/klog/v2" + + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) const ( @@ -144,6 +145,7 @@ func performTokenExchange( directive *authDirective, tenant string, accessToken string) (string, error) { + logger := log.Background().WithName("performTokenExchange") var err error data := url.Values{ "service": []string{directive.service}, @@ -176,7 +178,7 @@ func performTokenExchange( if exchange.Header != nil { if correlationID, ok := exchange.Header["X-Ms-Correlation-Request-Id"]; ok { - klog.V(4).Infof("correlationID: %s", correlationID) + logger.V(4).Info("", "correlationID", correlationID) } } diff --git a/pkg/credentialprovider/azure_credentials.go b/pkg/credentialprovider/azure_credentials.go index 70137e52c8..b201efe492 100644 --- a/pkg/credentialprovider/azure_credentials.go +++ b/pkg/credentialprovider/azure_credentials.go @@ -25,6 +25,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/configloader" + "sigs.k8s.io/cloud-provider-azure/pkg/log" providerconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" "github.com/Azure/azure-sdk-for-go/sdk/azcore" @@ -69,6 +70,7 @@ type acrProvider struct { type getTokenCredentialFunc func(req *v1.CredentialProviderRequest, config *providerconfig.AzureClientConfig) (azcore.TokenCredential, error) func NewAcrProvider(req *v1.CredentialProviderRequest, registryMirrorStr string, configFile string) (CredentialProvider, error) { + logger := log.Background().WithName("NewAcrProvider") config, err := configloader.Load[providerconfig.AzureClientConfig](context.Background(), nil, &configloader.FileLoaderConfig{FilePath: configFile}) if err != nil { return nil, fmt.Errorf("failed to load config: %w", err) @@ -88,10 +90,10 @@ func NewAcrProvider(req *v1.CredentialProviderRequest, registryMirrorStr string, // kubelet is responsible for checking the service account token emptiness when service account token is enabled, and only when service account token provide is enabled, // service account token is set in the request, so we can safely check the service account token emptiness to decide which credential to use. if len(req.ServiceAccountToken) != 0 { - klog.V(2).Infof("Using service account token to authenticate ACR for image %s", req.Image) + logger.V(2).Info("Using service account token to authenticate ACR", "image", req.Image) getTokenCredential = getServiceAccountTokenCredential } else { - klog.V(2).Infof("Using managed identity to authenticate ACR for image %s", req.Image) + logger.V(2).Info("Using managed identity to authenticate ACR", "image", req.Image) getTokenCredential = getManagedIdentityCredential } credential, err := getTokenCredential(req, config) @@ -138,10 +140,11 @@ func getManagedIdentityCredential(_ *v1.CredentialProviderRequest, config *provi } func getServiceAccountTokenCredential(req *v1.CredentialProviderRequest, config *providerconfig.AzureClientConfig) (azcore.TokenCredential, error) { + logger := log.Background().WithName("getServiceAccountTokenCredential") if len(req.ServiceAccountToken) == 0 { return nil, fmt.Errorf("kubernetes Service account token is not provided for image %s", req.Image) } - klog.V(2).Infof("Kubernetes Service account token is provided for image %s", req.Image) + logger.V(2).Info("Kubernetes Service account token is provided", "image", req.Image) clientOption, _, err := azclient.GetAzCoreClientOption(&config.ARMClientConfig) if err != nil { @@ -176,9 +179,10 @@ func getServiceAccountTokenCredential(req *v1.CredentialProviderRequest, config } func (a *acrProvider) GetCredentials(ctx context.Context, image string, _ []string) (*v1.CredentialProviderResponse, error) { + logger := log.Background().WithName("GetCredentials") targetloginServer, sourceloginServer := a.parseACRLoginServerFromImage(image) if targetloginServer == "" { - klog.V(2).Infof("image(%s) is not from ACR, return empty authentication", image) + logger.V(2).Info("image is not from ACR, return empty authentication", "image", image) return &v1.CredentialProviderResponse{ CacheKeyType: v1.RegistryPluginCacheKeyType, CacheDuration: &metav1.Duration{Duration: 0}, @@ -250,6 +254,7 @@ func (a *acrProvider) GetCredentials(ctx context.Context, image string, _ []stri // getFromACR gets credentials from ACR. func (a *acrProvider) getFromACR(ctx context.Context, loginServer string) (string, string, error) { + logger := log.Background().WithName("getFromACR") var armAccessToken azcore.AccessToken var err error if armAccessToken, err = a.credential.GetToken(ctx, policy.TokenRequestOptions{ @@ -261,14 +266,14 @@ func (a *acrProvider) getFromACR(ctx context.Context, loginServer string) (strin return "", "", err } - klog.V(4).Infof("discovering auth redirects for: %s", loginServer) + logger.V(4).Info("discovering auth redirects", "loginServer", loginServer) directive, err := receiveChallengeFromLoginServer(loginServer, "https") if err != nil { klog.Errorf("failed to receive challenge: %s", err) return "", "", err } - klog.V(4).Infof("exchanging an acr refresh_token") + logger.V(4).Info("exchanging an acr refresh_token") registryRefreshToken, err := performTokenExchange(directive, a.config.TenantID, armAccessToken.Token) if err != nil { klog.Errorf("failed to perform token exchange: %s", err) diff --git a/pkg/metrics/azure_metrics.go b/pkg/metrics/azure_metrics.go index e161c5c4b1..8f30260b46 100644 --- a/pkg/metrics/azure_metrics.go +++ b/pkg/metrics/azure_metrics.go @@ -22,9 +22,9 @@ import ( "k8s.io/component-base/metrics" "k8s.io/component-base/metrics/legacyregistry" - "k8s.io/klog/v2" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) var ( @@ -98,11 +98,12 @@ func (mc *MetricContext) ObserveOperationWithResult(isOperationSucceeded bool, l } func (mc *MetricContext) logLatency(logLevel int32, latency float64, additionalKeysAndValues ...interface{}) { + logger := log.Background().WithName("logLatency") keysAndValues := []interface{}{"latency_seconds", latency} for i, label := range metricLabels { keysAndValues = append(keysAndValues, label, mc.attributes[i]) } - klog.V(klog.Level(logLevel)).InfoS("Observed Request Latency", append(keysAndValues, additionalKeysAndValues...)...) + logger.V(int(logLevel)).Info("Observed Request Latency", append(keysAndValues, additionalKeysAndValues...)...) } // CountFailedOperation increase the number of failed operations diff --git a/pkg/nodeipam/ipam/cloud_cidr_allocator.go b/pkg/nodeipam/ipam/cloud_cidr_allocator.go index ff1e9b853e..6b42aedcf8 100644 --- a/pkg/nodeipam/ipam/cloud_cidr_allocator.go +++ b/pkg/nodeipam/ipam/cloud_cidr_allocator.go @@ -89,6 +89,7 @@ func NewCloudCIDRAllocator( allocatorParams CIDRAllocatorParams, nodeList *v1.NodeList, ) (CIDRAllocator, error) { + logger := log.Background().WithName("NewCloudCIDRAllocator") if client == nil { klog.Fatalf("kubeClient is nil when starting NodeController") } @@ -96,7 +97,7 @@ func NewCloudCIDRAllocator( eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"}) eventBroadcaster.StartStructuredLogging(0) - klog.V(0).Infof("Sending events to api server.") + logger.V(0).Info("Sending events to api server.") eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) az, ok := cloud.(*providerazure.Cloud) @@ -150,13 +151,13 @@ func NewCloudCIDRAllocator( if allocatorParams.ServiceCIDR != nil { filterOutServiceRange(ca.clusterCIDRs, ca.cidrSets, allocatorParams.ServiceCIDR) } else { - klog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.") + logger.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.") } if allocatorParams.SecondaryServiceCIDR != nil { filterOutServiceRange(ca.clusterCIDRs, ca.cidrSets, allocatorParams.SecondaryServiceCIDR) } else { - klog.V(0).Info("No Secondary Service CIDR provided. Skipping filtering out secondary service addresses.") + logger.V(0).Info("No Secondary Service CIDR provided. Skipping filtering out secondary service addresses.") } // mark the CIDRs on the existing nodes as used @@ -164,10 +165,10 @@ func NewCloudCIDRAllocator( for _, node := range nodeList.Items { node := node if len(node.Spec.PodCIDRs) == 0 { - klog.V(4).Infof("Node %v has no CIDR, ignoring", node.Name) + logger.V(4).Info("Node has no CIDR, ignoring", "nodeName", node.Name) continue } - klog.V(4).Infof("Node %v has CIDR %s, occupying it in CIDR map", node.Name, node.Spec.PodCIDR) + logger.V(4).Info("Node has CIDR, occupying it in CIDR map", "nodeName", node.Name, "podCIDR", node.Spec.PodCIDR) if err := ca.occupyCIDRs(&node); err != nil { // This will happen if: // 1. We find garbage in the podCIDRs field. Retrying is useless. @@ -196,7 +197,7 @@ func NewCloudCIDRAllocator( DeleteFunc: nodeutil.CreateDeleteNodeHandler(ca.ReleaseCIDR), }) - klog.V(0).Infof("Using cloud CIDR allocator (provider: %v)", cloud.ProviderName()) + logger.V(0).Info("Using cloud CIDR allocator", "provider", cloud.ProviderName()) return ca, nil } @@ -352,11 +353,12 @@ func (ca *cloudCIDRAllocator) occupyCIDRs(node *v1.Node) error { // function you have to make sure to update nodesInProcessing properly with the // disposition of the node when the work is done. func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { + logger := log.Background().WithName("AllocateOrOccupyCIDR") if node == nil || node.Spec.ProviderID == "" { return nil } if !ca.insertNodeToProcessing(node.Name) { - klog.V(2).InfoS("Node is already in a process of CIDR assignment", "node", klog.KObj(node)) + logger.V(2).Info("Node is already in a process of CIDR assignment", "node", klog.KObj(node)) return nil } @@ -399,13 +401,14 @@ func (ca *cloudCIDRAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { allocated.allocatedCIDRs[i] = podCIDR } - klog.V(4).Infof("Putting node %s into the work queue", node.Name) + logger.V(4).Info("Putting node into the work queue", "nodeName", node.Name) ca.nodeUpdateChannel <- allocated return nil } // updateCIDRsAllocation assigns CIDR to Node and sends an update to the API server. func (ca *cloudCIDRAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) error { + logger := log.Background().WithName("updateCIDRsAllocation") var err error var node *v1.Node defer ca.removeNodeFromProcessing(data.nodeName) @@ -431,7 +434,7 @@ func (ca *cloudCIDRAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) erro } } if match { - klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, data.allocatedCIDRs) + logger.V(4).Info("Node already has allocated CIDR. It matches the proposed one.", "nodeName", node.Name, "allocatedCIDRs", data.allocatedCIDRs) return nil } } @@ -483,6 +486,7 @@ func (ca *cloudCIDRAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) erro } func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error { + logger := log.Background().WithName("ReleaseCIDR") if node == nil || len(node.Spec.PodCIDRs) == 0 { return nil } @@ -497,7 +501,7 @@ func (ca *cloudCIDRAllocator) ReleaseCIDR(node *v1.Node) error { return fmt.Errorf("node:%s has an allocated cidr: %v at index:%v that does not exist in cluster cidrs configuration", node.Name, cidr, i) } - klog.V(4).Infof("release CIDR %s for node:%v", cidr, node.Name) + logger.V(4).Info("release CIDR for node", "cidr", cidr, "nodeName", node.Name) if err = ca.cidrSets[i].Release(podCIDR); err != nil { return fmt.Errorf("error when releasing CIDR %v: %w", cidr, err) } diff --git a/pkg/nodeipam/ipam/range_allocator.go b/pkg/nodeipam/ipam/range_allocator.go index f5fa8c5b87..e280d386e9 100644 --- a/pkg/nodeipam/ipam/range_allocator.go +++ b/pkg/nodeipam/ipam/range_allocator.go @@ -37,6 +37,7 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/nodeipam/ipam/cidrset" nodeutil "sigs.k8s.io/cloud-provider-azure/pkg/util/controller/node" utilnode "sigs.k8s.io/cloud-provider-azure/pkg/util/node" @@ -74,6 +75,7 @@ type rangeAllocator struct { // Caller must ensure that ClusterCIDRs are semantically correct e.g (1 for non DualStack, 2 for DualStack etc..) // can initialize its CIDR map. NodeList is only nil in testing. func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.NodeInformer, allocatorParams CIDRAllocatorParams, nodeList *v1.NodeList) (CIDRAllocator, error) { + logger := log.Background().WithName("NewCIDRRangeAllocator") if client == nil { klog.Fatalf("kubeClient is nil when starting NodeController") } @@ -81,7 +83,7 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cidrAllocator"}) eventBroadcaster.StartStructuredLogging(0) - klog.V(0).Infof("Sending events to api server.") + logger.V(0).Info("Sending events to api server.") eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")}) // create a cidrSet for each cidr we operate on @@ -109,22 +111,22 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No if allocatorParams.ServiceCIDR != nil { filterOutServiceRange(ra.clusterCIDRs, ra.cidrSets, allocatorParams.ServiceCIDR) } else { - klog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.") + logger.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.") } if allocatorParams.SecondaryServiceCIDR != nil { filterOutServiceRange(ra.clusterCIDRs, ra.cidrSets, allocatorParams.SecondaryServiceCIDR) } else { - klog.V(0).Info("No Secondary Service CIDR provided. Skipping filtering out secondary service addresses.") + logger.V(0).Info("No Secondary Service CIDR provided. Skipping filtering out secondary service addresses.") } if nodeList != nil { for i, node := range nodeList.Items { if len(node.Spec.PodCIDRs) == 0 { - klog.V(4).Infof("Node %v has no CIDR, ignoring", node.Name) + logger.V(4).Info("Node has no CIDR, ignoring", "nodeName", node.Name) continue } - klog.V(4).Infof("Node %v has CIDR %s, occupying it in CIDR map", node.Name, node.Spec.PodCIDR) + logger.V(4).Info("Node has CIDR, occupying it in CIDR map", "nodeName", node.Name, "podCIDR", node.Spec.PodCIDR) if err := ra.occupyCIDRs(&nodeList.Items[i]); err != nil { // This will happen if: // 1. We find garbage in the podCIDRs field. Retrying is useless. @@ -173,10 +175,11 @@ func NewCIDRRangeAllocator(client clientset.Interface, nodeInformer informers.No } func (r *rangeAllocator) Run(ctx context.Context) { + logger := log.Background().WithName("Run") defer utilruntime.HandleCrash() - klog.Infof("Starting range CIDR allocator") - defer klog.Infof("Shutting down range CIDR allocator") + logger.Info("Starting range CIDR allocator") + defer logger.Info("Shutting down range CIDR allocator") if !cache.WaitForNamedCacheSync("cidrallocator", ctx.Done(), r.nodesSynced) { return @@ -252,11 +255,12 @@ func (r *rangeAllocator) occupyCIDRs(node *v1.Node) error { // function you have to make sure to update nodesInProcessing properly with the // disposition of the node when the work is done. func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { + logger := log.Background().WithName("AllocateOrOccupyCIDR") if node == nil { return nil } if !r.insertNodeToProcessing(node.Name) { - klog.V(2).Infof("Node %v is already in a process of CIDR assignment.", node.Name) + logger.V(2).Info("Node is already in a process of CIDR assignment.", "nodeName", node.Name) return nil } @@ -277,13 +281,14 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { } // queue the assignment - klog.V(4).Infof("Putting node %s with CIDR %v into the work queue", node.Name, allocated.allocatedCIDRs) + logger.V(4).Info("Putting node with CIDR into the work queue", "nodeName", node.Name, "allocatedCIDRs", allocated.allocatedCIDRs) r.nodeCIDRUpdateChannel <- allocated return nil } // ReleaseCIDR marks node.podCIDRs[...] as unused in our tracked cidrSets func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error { + logger := log.Background().WithName("ReleaseCIDR") if node == nil || len(node.Spec.PodCIDRs) == 0 { return nil } @@ -301,7 +306,7 @@ func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error { return fmt.Errorf("node:%s has an allocated cidr: %v at index:%v that does not exist in cluster cidrs configuration", node.Name, cidr, idx) } - klog.V(4).Infof("release CIDR %s for node:%v", cidr, node.Name) + logger.V(4).Info("release CIDR for node", "cidr", cidr, "nodeName", node.Name) if err = r.cidrSets[idx].Release(podCIDR); err != nil { return fmt.Errorf("error when releasing CIDR %v: %w", cidr, err) } @@ -350,6 +355,7 @@ func (r *rangeAllocator) allocatePodCIDRs() ([]*net.IPNet, error) { // updateCIDRsAllocation assigns CIDR to Node and sends an update to the API server. func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) (dataToRetry nodeReservedCIDRs, err error) { + logger := log.Background().WithName("updateCIDRsAllocation") var node *v1.Node defer func() { @@ -392,7 +398,7 @@ func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) (dataToRe } } if match { - klog.V(4).Infof("Node %v already has allocated CIDR %v. It matches the proposed one.", node.Name, data.allocatedCIDRs) + logger.V(4).Info("Node already has allocated CIDR. It matches the proposed one.", "nodeName", node.Name, "allocatedCIDRs", data.allocatedCIDRs) return data, nil } } diff --git a/pkg/nodemanager/nodemanager.go b/pkg/nodemanager/nodemanager.go index 79ce96b363..4940b57a62 100644 --- a/pkg/nodemanager/nodemanager.go +++ b/pkg/nodemanager/nodemanager.go @@ -145,15 +145,15 @@ func NewCloudNodeController( nodeProvider NodeProvider, nodeStatusUpdateFrequency time.Duration, waitForRoutes, enableBetaTopologyLabels bool) *CloudNodeController { - + logger := log.Background().WithName("NewCloudNodeController") eventBroadcaster := record.NewBroadcaster() recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}) eventBroadcaster.StartLogging(klog.Infof) if kubeClient != nil { - klog.V(0).Infof("Sending events to api server.") + logger.V(0).Info("Sending events to api server.") eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")}) } else { - klog.V(0).Infof("No api server defined - no events will be sent to API server.") + logger.V(0).Info("No api server defined - no events will be sent to API server.") } cnc := &CloudNodeController{ @@ -280,9 +280,10 @@ func (cnc *CloudNodeController) handleNodeEventWrapper(ctx context.Context, node // handleNodeEvent processes both add and update events for nodes that need cloud initialization func (cnc *CloudNodeController) handleNodeEvent(ctx context.Context, node *v1.Node) error { + logger := log.Background().WithName("handleNodeEvent") cloudTaint := GetCloudTaint(node.Spec.Taints) if cloudTaint == nil { - klog.V(2).Infof("Node %s has no cloud taint, skipping initialization", node.Name) + logger.V(2).Info("Node has no cloud taint, skipping initialization", "nodeName", node.Name) return nil } @@ -331,10 +332,11 @@ func (cnc *CloudNodeController) reconcileNodeLabels(node *v1.Node) error { // UpdateNodeAddress updates the nodeAddress of a single node func (cnc *CloudNodeController) updateNodeAddress(ctx context.Context, node *v1.Node) error { + logger := log.Background().WithName("updateNodeAddress") // Do not process nodes that are still tainted cloudTaint := GetCloudTaint(node.Spec.Taints) if cloudTaint != nil { - klog.V(5).Infof("This node %s is still tainted. Will not process.", node.Name) + logger.V(5).Info("This node is still tainted. Will not process.", "nodeName", node.Name) return nil } @@ -344,7 +346,7 @@ func (cnc *CloudNodeController) updateNodeAddress(ctx context.Context, node *v1. // Continue to update node address when not sure the node is not exists klog.Warningf("ensureNodeExistsByProviderID (node %s) reported an error (%v), continue to update its address", node.Name, err) } else if !exists { - klog.V(4).Infof("The node %s is no longer present according to the cloud provider, do not process.", node.Name) + logger.V(4).Info("The node is no longer present according to the cloud provider, do not process.", "nodeName", node.Name) return nil } @@ -354,7 +356,7 @@ func (cnc *CloudNodeController) updateNodeAddress(ctx context.Context, node *v1. } if len(nodeAddresses) == 0 { - klog.V(5).Infof("Skipping node address update for node %q since cloud provider did not return any", node.Name) + logger.V(5).Info("Skipping node address update since cloud provider did not return any", "nodeName", node.Name) return nil } @@ -401,7 +403,7 @@ type nodeModifier func(*v1.Node) // This processes nodes that were added into the cluster, and cloud initialize them if appropriate func (cnc *CloudNodeController) initializeNode(ctx context.Context, node *v1.Node) error { logger := log.Background().WithName("initializeNode") - klog.Infof("Initializing node %s with cloud provider", node.Name) + logger.Info("Initializing node with cloud provider", "node", node.Name) curNode, err := cnc.kubeClient.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{}) if err != nil { return fmt.Errorf("failed to get node: %w", err) @@ -550,7 +552,8 @@ func (cnc *CloudNodeController) getNodeModifiersFromCloudProvider(ctx context.Co // addCloudNodeLabel creates a nodeModifier that adds a label to a node. func addCloudNodeLabel(key, value string) func(*v1.Node) { - klog.V(2).Infof("Adding node label from cloud provider: %s=%s", key, value) + logger := log.Background().WithName("addCloudNodeLabel") + logger.V(2).Info("Adding node label from cloud provider", "key", key, "value", value) return func(node *v1.Node) { if node.Labels == nil { node.Labels = map[string]string{} @@ -701,19 +704,19 @@ func (cnc *CloudNodeController) getInterconnectGroupID(ctx context.Context) (str } func (cnc *CloudNodeController) updateNetworkingCondition(node *v1.Node, networkReady bool) error { + logger := log.Background().WithName("updateNetworkingCondition") _, condition := nodeutil.GetNodeCondition(&(node.Status), v1.NodeNetworkUnavailable) if networkReady && condition != nil && condition.Status == v1.ConditionFalse { - klog.V(4).Infof("set node %v with NodeNetworkUnavailable=false was canceled because it is already set", node.Name) + logger.V(4).Info("set node with NodeNetworkUnavailable=false was canceled because it is already set", "nodeName", node.Name) return nil } if !networkReady && condition != nil && condition.Status == v1.ConditionTrue { - klog.V(4).Infof("set node %v with NodeNetworkUnavailable=true was canceled because it is already set", node.Name) + logger.V(4).Info("set node with NodeNetworkUnavailable=true was canceled because it is already set", "nodeName", node.Name) return nil } - klog.V(2).Infof("Patching node status %v with %v previous condition was:%+v", node.Name, networkReady, condition) - + logger.V(2).Info("Patching node status", "nodeName", node.Name, "networkReady", networkReady, "previousCondition", condition) // either condition is not there, or has a value != to what we need // start setting it err := clientretry.RetryOnConflict(updateNetworkConditionBackoff, func() error { @@ -739,7 +742,7 @@ func (cnc *CloudNodeController) updateNetworkingCondition(node *v1.Node, network }) } if err != nil { - klog.V(4).Infof("Error updating node %s, retrying: %v", types.NodeName(node.Name), err) + logger.V(4).Info("Error updating node, retrying", "nodeName", types.NodeName(node.Name), "error", err) } return err }) diff --git a/pkg/provider/azure.go b/pkg/provider/azure.go index ae2b2d5e8c..e801af7c27 100644 --- a/pkg/provider/azure.go +++ b/pkg/provider/azure.go @@ -399,7 +399,7 @@ func (az *Cloud) InitializeCloudFromConfig(ctx context.Context, config *azurecon return fmt.Errorf("useInstanceMetadata must be enabled without Azure credentials") } - klog.V(2).Infof("Azure cloud provider is starting without credentials") + logger.V(2).Info("Azure cloud provider is starting without credentials") } if az.UserAgent == "" { @@ -587,6 +587,7 @@ func (az *Cloud) setLBDefaults(config *azureconfig.Config) error { } func (az *Cloud) setCloudProviderBackoffDefaults(config *azureconfig.Config) wait.Backoff { + logger := log.Background().WithName("setCloudProviderBackoffDefaults") // Conditionally configure resource request backoff resourceRequestBackoff := wait.Backoff{ Steps: 1, @@ -613,11 +614,11 @@ func (az *Cloud) setCloudProviderBackoffDefaults(config *azureconfig.Config) wai Duration: time.Duration(config.CloudProviderBackoffDuration) * time.Second, Jitter: config.CloudProviderBackoffJitter, } - klog.V(2).Infof("Azure cloudprovider using try backoff: retries=%d, exponent=%f, duration=%d, jitter=%f", - config.CloudProviderBackoffRetries, - config.CloudProviderBackoffExponent, - config.CloudProviderBackoffDuration, - config.CloudProviderBackoffJitter) + logger.V(2).Info("Azure cloudprovider using try backoff", + "retries", config.CloudProviderBackoffRetries, + "exponent", config.CloudProviderBackoffExponent, + "duration", config.CloudProviderBackoffDuration, + "jitter", config.CloudProviderBackoffJitter) } else { // CloudProviderBackoffRetries will be set to 1 by default as the requirements of Azure SDK. config.CloudProviderBackoffRetries = 1 @@ -719,7 +720,7 @@ func (az *Cloud) SetInformers(informerFactory informers.SharedInformerFactory) { } az.updateNodeCaches(node, nil) - klog.V(4).Infof("Removing node %s from VMSet cache.", node.Name) + logger.V(4).Info("Removing node from VMSet cache", "node", node.Name) _ = az.VMSet.DeleteCacheForNode(context.Background(), node.Name) }, }) @@ -768,7 +769,7 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { // Remove from nodePrivateIPs cache. for _, address := range getNodePrivateIPAddresses(prevNode) { - klog.V(6).Infof("removing IP address %s of the node %s", address, prevNode.Name) + logger.V(6).Info("removing IP address of the node", "address", address, "node", prevNode.Name) az.nodePrivateIPs[prevNode.Name].Delete(address) delete(az.nodePrivateIPToNodeNameMap, address) } @@ -810,11 +811,11 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { switch { case !isNodeManagedByCloudProvider: az.excludeLoadBalancerNodes.Insert(newNode.Name) - klog.V(6).Infof("excluding Node %q from LoadBalancer because it is not managed by cloud provider", newNode.Name) + logger.V(6).Info("excluding Node from LoadBalancer because it is not managed by cloud provider", "node", newNode.Name) case hasExcludeBalancerLabel: az.excludeLoadBalancerNodes.Insert(newNode.Name) - klog.V(6).Infof("excluding Node %q from LoadBalancer because it has exclude-from-external-load-balancers label", newNode.Name) + logger.V(6).Info("excluding Node from LoadBalancer because it has exclude-from-external-load-balancers label", "node", newNode.Name) default: // Nodes not falling into the three cases above are valid backends and @@ -828,7 +829,7 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { az.nodePrivateIPToNodeNameMap = make(map[string]string) } - klog.V(6).Infof("adding IP address %s of the node %s", address, newNode.Name) + logger.V(6).Info("adding IP address of the node", "address", address, "node", newNode.Name) az.nodePrivateIPs[strings.ToLower(newNode.Name)] = utilsets.SafeInsert(az.nodePrivateIPs[strings.ToLower(newNode.Name)], address) az.nodePrivateIPToNodeNameMap[address] = newNode.Name } @@ -837,6 +838,7 @@ func (az *Cloud) updateNodeCaches(prevNode, newNode *v1.Node) { // updateNodeTaint updates node out-of-service taint func (az *Cloud) updateNodeTaint(node *v1.Node) { + logger := log.Background().WithName("updateNodeTaint") if node == nil { klog.Warningf("node is nil, skip updating node out-of-service taint (should not happen)") return @@ -854,12 +856,12 @@ func (az *Cloud) updateNodeTaint(node *v1.Node) { // node shutdown taint is added when cloud provider determines instance is shutdown if !taints.TaintExists(node.Spec.Taints, nodeOutOfServiceTaint) && taints.TaintExists(node.Spec.Taints, nodeShutdownTaint) { - klog.V(2).Infof("adding %s taint to node %s", v1.TaintNodeOutOfService, node.Name) + logger.V(2).Info("adding taint to node", "taint", v1.TaintNodeOutOfService, "node", node.Name) if err := cloudnodeutil.AddOrUpdateTaintOnNode(az.KubeClient, node.Name, nodeOutOfServiceTaint); err != nil { klog.Errorf("failed to add taint %s to the node %s", v1.TaintNodeOutOfService, node.Name) } } else { - klog.V(2).Infof("node %s is not ready but either shutdown taint is missing or out-of-service taint is already added, skip adding node out-of-service taint", node.Name) + logger.V(2).Info("node is not ready but either shutdown taint is missing or out-of-service taint is already added, skip adding node out-of-service taint", "node", node.Name) } } } diff --git a/pkg/provider/azure_controller_standard.go b/pkg/provider/azure_controller_standard.go index f4a69d3744..b77d95fc6a 100644 --- a/pkg/provider/azure_controller_standard.go +++ b/pkg/provider/azure_controller_standard.go @@ -30,11 +30,13 @@ import ( "k8s.io/utils/ptr" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/util/errutils" ) // AttachDisk attaches a disk to vm func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error { + logger := log.Background().WithName("AttachDisk") vm, err := as.getVirtualMachine(ctx, nodeName, azcache.CacheReadTypeDefault) if err != nil { return err @@ -63,7 +65,7 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa } } if attached { - klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.Lun) + logger.V(2).Info("azureDisk - disk already attached to node on LUN", "diskURI", diskURI, "nodeName", nodeName, "LUN", opt.Lun) continue } @@ -99,7 +101,7 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa }, Location: vm.Location, } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%v)", nodeResourceGroup, vmName, diskMap) + logger.V(2).Info("azureDisk - update: vm - attach disk list", "resourceGroup", nodeResourceGroup, "vmName", vmName, "diskMap", diskMap) result, rerr := as.ComputeClientFactory.GetVirtualMachineClient().CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) if rerr != nil { @@ -112,7 +114,7 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa } } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%v) returned with %v", nodeResourceGroup, vmName, diskMap, err) + logger.V(2).Info("azureDisk - update: vm - attach disk list returned", "resourceGroup", nodeResourceGroup, "vmName", vmName, "diskMap", diskMap, "error", err) if rerr == nil && result != nil { as.updateCache(vmName, result) @@ -123,9 +125,10 @@ func (as *availabilitySet) AttachDisk(ctx context.Context, nodeName types.NodeNa } func (as *availabilitySet) DeleteCacheForNode(_ context.Context, nodeName string) error { + logger := log.Background().WithName("DeleteCacheForNode") err := as.vmCache.Delete(nodeName) if err == nil { - klog.V(2).Infof("DeleteCacheForNode(%s) successfully", nodeName) + logger.V(2).Info("DeleteCacheForNode successfully", "nodeName", nodeName) } else { klog.Errorf("DeleteCacheForNode(%s) failed with %v", nodeName, err) } @@ -134,6 +137,7 @@ func (as *availabilitySet) DeleteCacheForNode(_ context.Context, nodeName string // DetachDisk detaches a disk from VM func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error { + logger := log.Background().WithName("DetachDisk") vm, err := as.getVirtualMachine(ctx, nodeName, azcache.CacheReadTypeDefault) if err != nil { // if host doesn't exist, no need to detach @@ -157,7 +161,7 @@ func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeNa (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) || (disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) { // found the disk - klog.V(2).Infof("azureDisk - detach disk: name %s uri %s", diskName, diskURI) + logger.V(2).Info("azureDisk - detach disk", "diskName", diskName, "diskURI", diskURI) disks[i].ToBeDetached = ptr.To(true) if forceDetach { disks[i].DetachOption = to.Ptr(armcompute.DiskDetachOptionTypesForceDetach) @@ -191,7 +195,7 @@ func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeNa }, Location: vm.Location, } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) node(%s)- detach disk list(%s)", nodeResourceGroup, vmName, nodeName, diskMap) + logger.V(2).Info("azureDisk - update: vm node - detach disk list", "resourceGroup", nodeResourceGroup, "vmName", vmName, "nodeName", nodeName, "diskMap", diskMap) result, err := as.ComputeClientFactory.GetVirtualMachineClient().CreateOrUpdate(ctx, nodeResourceGroup, vmName, newVM) if err != nil { @@ -205,7 +209,7 @@ func (as *availabilitySet) DetachDisk(ctx context.Context, nodeName types.NodeNa } } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s) returned with %v", nodeResourceGroup, vmName, diskMap, err) + logger.V(2).Info("azureDisk - update: vm - detach disk list returned with error", "resourceGroup", nodeResourceGroup, "vmName", vmName, "diskMap", diskMap, "error", err) if err == nil && result != nil { as.updateCache(vmName, result) @@ -240,6 +244,7 @@ func (as *availabilitySet) UpdateVM(ctx context.Context, nodeName types.NodeName } func (as *availabilitySet) updateCache(nodeName string, vm *armcompute.VirtualMachine) { + logger := log.Background().WithName("updateCache") if nodeName == "" { klog.Errorf("updateCache(%s) failed with empty nodeName", nodeName) return @@ -249,7 +254,7 @@ func (as *availabilitySet) updateCache(nodeName string, vm *armcompute.VirtualMa return } as.vmCache.Update(nodeName, vm) - klog.V(2).Infof("updateCache(%s) successfully", nodeName) + logger.V(2).Info("updateCache successfully", "nodeName", nodeName) } // GetDataDisks gets a list of data disks attached to the node. diff --git a/pkg/provider/azure_controller_vmss.go b/pkg/provider/azure_controller_vmss.go index d5b12f2ea7..8e96493844 100644 --- a/pkg/provider/azure_controller_vmss.go +++ b/pkg/provider/azure_controller_vmss.go @@ -30,11 +30,13 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/util/errutils" ) // AttachDisk attaches a disk to vm func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error { + logger := log.Background().WithName("AttachDisk") vmName := mapNodeNameToVMName(nodeName) vm, err := ss.getVmssVM(ctx, vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -69,7 +71,7 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis } } if attached { - klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.Lun) + logger.V(2).Info("azureDisk - disk already attached to node on LUN", "diskURI", diskURI, "nodeName", nodeName, "LUN", opt.Lun) continue } @@ -105,7 +107,7 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis }, } - klog.V(2).Infof("azureDisk - update: rg(%s) vm(%s) - attach disk list(%+v)", nodeResourceGroup, nodeName, diskMap) + logger.V(2).Info("azureDisk - update: rg vm - attach disk list", "resourceGroup", nodeResourceGroup, "nodeName", nodeName, "diskMap", diskMap) result, rerr := ss.ComputeClientFactory.GetVirtualMachineScaleSetVMClient().Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, *newVM) if rerr != nil { klog.Errorf("azureDisk - attach disk list(%+v) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr) @@ -117,7 +119,7 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis } } - klog.V(2).Infof("azureDisk - update: rg(%s) vm(%s) - attach disk list(%+v) returned with %v", nodeResourceGroup, nodeName, diskMap, rerr) + logger.V(2).Info("azureDisk - update: rg vm - attach disk list returned with error", "resourceGroup", nodeResourceGroup, "nodeName", nodeName, "diskMap", diskMap, "error", rerr) if rerr == nil && result != nil && result.Properties != nil { if err := ss.updateCache(ctx, vmName, nodeResourceGroup, vm.VMSSName, vm.InstanceID, result); err != nil { @@ -131,6 +133,7 @@ func (ss *ScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, dis // DetachDisk detaches a disk from VM func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error { + logger := log.Background().WithName("DetachDisk") vmName := mapNodeNameToVMName(nodeName) vm, err := ss.getVmssVM(ctx, vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -158,7 +161,7 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) || (disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) { // found the disk - klog.V(2).Infof("azureDisk - detach disk: name %s uri %s", diskName, diskURI) + logger.V(2).Info("azureDisk - detach disk", "diskName", diskName, "diskURI", diskURI) disks[i].ToBeDetached = ptr.To(true) if forceDetach { disks[i].DetachOption = to.Ptr(armcompute.DiskDetachOptionTypesForceDetach) @@ -192,7 +195,7 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis }, } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s)", nodeResourceGroup, nodeName, diskMap) + logger.V(2).Info("azureDisk - update: vm - detach disk list", "resourceGroup", nodeResourceGroup, "nodeName", nodeName, "diskMap", diskMap) result, rerr := ss.ComputeClientFactory.GetVirtualMachineScaleSetVMClient().Update(ctx, nodeResourceGroup, vm.VMSSName, vm.InstanceID, *newVM) if rerr != nil { klog.Errorf("azureDisk - detach disk list(%+v) on rg(%s) vm(%s) failed, err: %v", diskMap, nodeResourceGroup, nodeName, rerr) @@ -204,7 +207,7 @@ func (ss *ScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, dis } } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%v) returned with %v", nodeResourceGroup, nodeName, diskMap, err) + logger.V(2).Info("azureDisk - update: vm - detach disk returned with error", "resourceGroup", nodeResourceGroup, "nodeName", nodeName, "diskMap", diskMap, "error", err) if rerr == nil && result != nil && result.Properties != nil { if err := ss.updateCache(ctx, vmName, nodeResourceGroup, vm.VMSSName, vm.InstanceID, result); err != nil { diff --git a/pkg/provider/azure_controller_vmssflex.go b/pkg/provider/azure_controller_vmssflex.go index 553d776953..5ba6684767 100644 --- a/pkg/provider/azure_controller_vmssflex.go +++ b/pkg/provider/azure_controller_vmssflex.go @@ -34,10 +34,12 @@ import ( "k8s.io/utils/ptr" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) // AttachDisk attaches a disk to vm func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]*AttachDiskOptions) error { + logger := log.Background().WithName("AttachDisk") vmName := mapNodeNameToVMName(nodeName) vm, err := fs.getVmssFlexVM(ctx, vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -66,7 +68,7 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, } } if attached { - klog.V(2).Infof("azureDisk - disk(%s) already attached to node(%s) on LUN(%d)", diskURI, nodeName, opt.Lun) + logger.V(2).Info("azureDisk - disk already attached to node on LUN", "diskURI", diskURI, "nodeName", nodeName, "LUN", opt.Lun) continue } @@ -103,7 +105,7 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, Location: vm.Location, } - klog.V(2).Infof("azureDisk - update: rg(%s) vm(%s) - attach disk list(%+v)", nodeResourceGroup, vmName, diskMap) + logger.V(2).Info("azureDisk - update: rg vm - attach disk list", "resourceGroup", nodeResourceGroup, "vmName", vmName, "diskMap", diskMap) result, err := fs.ComputeClientFactory.GetVirtualMachineClient().CreateOrUpdate(ctx, nodeResourceGroup, *vm.Name, newVM) var rerr *azcore.ResponseError if err != nil && errors.As(err, &rerr) { @@ -116,7 +118,7 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, } } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk list(%+v) returned with %v", nodeResourceGroup, vmName, diskMap, rerr) + logger.V(2).Info("azureDisk - update: vm - attach disk list returned with error", "resourceGroup", nodeResourceGroup, "vmName", vmName, "diskMap", diskMap, "error", rerr) if err == nil && result != nil { if rerr := fs.updateCache(ctx, vmName, result); rerr != nil { @@ -130,6 +132,7 @@ func (fs *FlexScaleSet) AttachDisk(ctx context.Context, nodeName types.NodeName, // DetachDisk detaches a disk from VM func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, diskMap map[string]string, forceDetach bool) error { + logger := log.Background().WithName("DetachDisk") vmName := mapNodeNameToVMName(nodeName) vm, err := fs.getVmssFlexVM(ctx, vmName, azcache.CacheReadTypeDefault) if err != nil { @@ -153,7 +156,7 @@ func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, (disk.Vhd != nil && disk.Vhd.URI != nil && diskURI != "" && strings.EqualFold(*disk.Vhd.URI, diskURI)) || (disk.ManagedDisk != nil && diskURI != "" && strings.EqualFold(*disk.ManagedDisk.ID, diskURI)) { // found the disk - klog.V(2).Infof("azureDisk - detach disk: name %s uri %s", diskName, diskURI) + logger.V(2).Info("azureDisk - detach disk", "diskName", diskName, "diskURI", diskURI) disks[i].ToBeDetached = ptr.To(true) if forceDetach { disks[i].DetachOption = to.Ptr(armcompute.DiskDetachOptionTypesForceDetach) @@ -188,7 +191,7 @@ func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, Location: vm.Location, } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) node(%s)- detach disk list(%s)", nodeResourceGroup, vmName, nodeName, diskMap) + logger.V(2).Info("azureDisk - update: vm node - detach disk list", "resourceGroup", nodeResourceGroup, "vmName", vmName, "nodeName", nodeName, "diskMap", diskMap) result, err := fs.ComputeClientFactory.GetVirtualMachineClient().CreateOrUpdate(ctx, nodeResourceGroup, *vm.Name, newVM) if err != nil { @@ -204,7 +207,7 @@ func (fs *FlexScaleSet) DetachDisk(ctx context.Context, nodeName types.NodeName, } } - klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk list(%s) returned with %v", nodeResourceGroup, vmName, diskMap, err) + logger.V(2).Info("azureDisk - update: vm - detach disk list returned with error", "resourceGroup", nodeResourceGroup, "vmName", vmName, "diskMap", diskMap, "error", err) if err == nil && result != nil { if rerr := fs.updateCache(ctx, vmName, result); rerr != nil { @@ -235,6 +238,7 @@ func (fs *FlexScaleSet) UpdateVM(ctx context.Context, nodeName types.NodeName) e } func (fs *FlexScaleSet) updateCache(ctx context.Context, nodeName string, vm *armcompute.VirtualMachine) error { + logger := log.Background().WithName("updateCache") if nodeName == "" { return fmt.Errorf("nodeName is empty") } @@ -267,7 +271,7 @@ func (fs *FlexScaleSet) updateCache(ctx context.Context, nodeName string, vm *ar fs.vmssFlexVMNameToVmssID.Store(strings.ToLower(*vm.Properties.OSProfile.ComputerName), vmssFlexID) fs.vmssFlexVMNameToNodeName.Store(*vm.Name, strings.ToLower(*vm.Properties.OSProfile.ComputerName)) - klog.V(2).Infof("updateCache(%s) for vmssFlexID(%s) successfully", nodeName, vmssFlexID) + logger.V(2).Info("updateCache for vmssFlexID successfully", "nodeName", nodeName, "vmssFlexID", vmssFlexID) return nil } diff --git a/pkg/provider/azure_instance_metadata.go b/pkg/provider/azure_instance_metadata.go index 072afb47e9..bb2c2ec054 100644 --- a/pkg/provider/azure_instance_metadata.go +++ b/pkg/provider/azure_instance_metadata.go @@ -30,6 +30,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) // NetworkMetadata contains metadata about an instance's network @@ -151,6 +152,7 @@ func fillNetInterfacePublicIPs(publicIPs []PublicIPMetadata, netInterface *Netwo } func (ims *InstanceMetadataService) getMetadata(_ context.Context, key string) (interface{}, error) { + logger := log.Background().WithName("getMetadata") instanceMetadata, err := ims.getInstanceMetadata(key) if err != nil { return nil, err @@ -168,7 +170,7 @@ func (ims *InstanceMetadataService) getMetadata(_ context.Context, key string) ( if err != nil || loadBalancerMetadata == nil || loadBalancerMetadata.LoadBalancer == nil { // Log a warning since loadbalancer metadata may not be available when the VM // is not in standard LoadBalancer backend address pool. - klog.V(4).Infof("Warning: failed to get loadbalancer metadata: %v", err) + logger.V(4).Info("Warning: failed to get loadbalancer metadata", "error", err) return instanceMetadata, nil } @@ -296,7 +298,8 @@ func (az *Cloud) GetPlatformSubFaultDomain(ctx context.Context) (string, error) // TODO: Implement actual IMDS parsing logic when format is finalized. // Currently returns empty string to allow infrastructure to be in place. func (az *Cloud) GetInterconnectGroupID(_ context.Context) (string, error) { + logger := log.Background().WithName("GetInterconnectGroupID") // Placeholder implementation - returns empty until IMDS format is determined - klog.V(4).Infof("GetInterconnectGroupID: placeholder implementation, returning empty") + logger.V(4).Info("placeholder implementation, returning empty") return "", nil } diff --git a/pkg/provider/azure_instances_v1.go b/pkg/provider/azure_instances_v1.go index fa883bc644..b571e634ff 100644 --- a/pkg/provider/azure_instances_v1.go +++ b/pkg/provider/azure_instances_v1.go @@ -26,10 +26,10 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" - "k8s.io/klog/v2" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) var _ cloudprovider.Instances = (*Cloud)(nil) @@ -45,9 +45,10 @@ var ( ) func (az *Cloud) addressGetter(ctx context.Context, nodeName types.NodeName) ([]v1.NodeAddress, error) { + logger := log.Background().WithName("addressGetter") ip, publicIP, err := az.getIPForMachine(ctx, nodeName) if err != nil { - klog.V(2).Infof("NodeAddresses(%s) abort backoff: %v", nodeName, err) + logger.V(2).Info("NodeAddresses abort backoff", "nodeName", nodeName, "error", err) return nil, err } @@ -66,13 +67,14 @@ func (az *Cloud) addressGetter(ctx context.Context, nodeName types.NodeName) ([] // NodeAddresses returns the addresses of the specified instance. func (az *Cloud) NodeAddresses(ctx context.Context, name types.NodeName) ([]v1.NodeAddress, error) { + logger := log.Background().WithName("NodeAddresses") // Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them. unmanaged, err := az.IsNodeUnmanaged(string(name)) if err != nil { return nil, err } if unmanaged { - klog.V(4).Infof("NodeAddresses: omitting unmanaged node %q", name) + logger.V(4).Info("omitting unmanaged node", "nodeName", name) return nil, nil } @@ -156,13 +158,14 @@ func (az *Cloud) getLocalInstanceNodeAddresses(netInterfaces []*NetworkInterface // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID string) ([]v1.NodeAddress, error) { + logger := log.Background().WithName("NodeAddressesByProviderID") if providerID == "" { return nil, errNodeNotInitialized } // Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them. if az.IsNodeUnmanagedByProviderID(providerID) { - klog.V(4).Infof("NodeAddressesByProviderID: omitting unmanaged node %q", providerID) + logger.V(4).Info("omitting unmanaged node", "providerID", providerID) return nil, nil } @@ -182,13 +185,14 @@ func (az *Cloud) NodeAddressesByProviderID(ctx context.Context, providerID strin // InstanceExistsByProviderID returns true if the instance with the given provider id still exists and is running. // If false is returned with no error, the instance will be immediately deleted by the cloud controller manager. func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID string) (bool, error) { + logger := log.Background().WithName("InstanceExistsByProviderID") if providerID == "" { return false, errNodeNotInitialized } // Returns true for unmanaged nodes because azure cloud provider always assumes them exists. if az.IsNodeUnmanagedByProviderID(providerID) { - klog.V(4).Infof("InstanceExistsByProviderID: assuming unmanaged node %q exists", providerID) + logger.V(4).Info("assuming unmanaged node exists", "providerID", providerID) return true, nil } @@ -218,6 +222,7 @@ func (az *Cloud) InstanceExistsByProviderID(ctx context.Context, providerID stri // InstanceShutdownByProviderID returns true if the instance is in safe state to detach volumes func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID string) (bool, error) { + logger := log.Background().WithName("InstanceShutdownByProviderID") if providerID == "" { return false, nil } @@ -245,7 +250,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st return false, err } - klog.V(3).Infof("InstanceShutdownByProviderID gets power status %q for node %q", powerStatus, nodeName) + logger.V(3).Info("gets power status for node", "powerStatus", powerStatus, "nodeName", nodeName) provisioningState, err := az.VMSet.GetProvisioningStateByNodeName(ctx, string(nodeName)) if err != nil { @@ -256,7 +261,7 @@ func (az *Cloud) InstanceShutdownByProviderID(ctx context.Context, providerID st return false, err } - klog.V(3).Infof("InstanceShutdownByProviderID gets provisioning state %q for node %q", provisioningState, nodeName) + logger.V(3).Info("gets provisioning state for node", "provisioningState", provisioningState, "nodeName", nodeName) status := strings.ToLower(powerStatus) provisioningSucceeded := strings.EqualFold(strings.ToLower(provisioningState), strings.ToLower(string(consts.ProvisioningStateSucceeded))) @@ -288,6 +293,7 @@ func (az *Cloud) isCurrentInstance(name types.NodeName, metadataVMName string) ( // InstanceID returns the cloud provider ID of the specified instance. // Note that if the instance does not exist or is no longer running, we must return ("", cloudprovider.InstanceNotFound) func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, error) { + logger := log.Background().WithName("InstanceID") nodeName := mapNodeNameToVMName(name) unmanaged, err := az.IsNodeUnmanaged(nodeName) if err != nil { @@ -295,7 +301,7 @@ func (az *Cloud) InstanceID(ctx context.Context, name types.NodeName) (string, e } if unmanaged { // InstanceID is same with nodeName for unmanaged nodes. - klog.V(4).Infof("InstanceID: getting ID %q for unmanaged node %q", name, name) + logger.V(4).Info("getting ID for unmanaged node", "id", name, "unmanaged", name) return nodeName, nil } @@ -350,13 +356,14 @@ func (az *Cloud) getLocalInstanceProviderID(metadata *InstanceMetadata, _ string // This method will not be called from the node that is requesting this ID. i.e. metadata service // and other local methods cannot be used here func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string) (string, error) { + logger := log.Background().WithName("InstanceTypeByProviderID") if providerID == "" { return "", errNodeNotInitialized } // Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them. if az.IsNodeUnmanagedByProviderID(providerID) { - klog.V(4).Infof("InstanceTypeByProviderID: omitting unmanaged node %q", providerID) + logger.V(4).Info("omitting unmanaged node", "providerID", providerID) return "", nil } @@ -378,13 +385,14 @@ func (az *Cloud) InstanceTypeByProviderID(ctx context.Context, providerID string // (Implementer Note): This is used by kubelet. Kubelet will label the node. Real log from kubelet: // Adding node label from cloud provider: beta.kubernetes.io/instance-type=[value] func (az *Cloud) InstanceType(ctx context.Context, name types.NodeName) (string, error) { + logger := log.Background().WithName("InstanceType") // Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them. unmanaged, err := az.IsNodeUnmanaged(string(name)) if err != nil { return "", err } if unmanaged { - klog.V(4).Infof("InstanceType: omitting unmanaged node %q", name) + logger.V(4).Info("omitting unmanaged node", "nodeName", name) return "", nil } diff --git a/pkg/provider/azure_instances_v2.go b/pkg/provider/azure_instances_v2.go index 38b08663b4..4d9032bbc1 100644 --- a/pkg/provider/azure_instances_v2.go +++ b/pkg/provider/azure_instances_v2.go @@ -24,6 +24,8 @@ import ( "k8s.io/apimachinery/pkg/types" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" + + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) var _ cloudprovider.InstancesV2 = (*Cloud)(nil) @@ -31,6 +33,7 @@ var _ cloudprovider.InstancesV2 = (*Cloud)(nil) // InstanceExists returns true if the instance for the given node exists according to the cloud provider. // Use the node.name or node.spec.providerID field to find the node in the cloud provider. func (az *Cloud) InstanceExists(ctx context.Context, node *v1.Node) (bool, error) { + logger := log.Background().WithName("InstanceExists") if node == nil { return false, nil } @@ -39,7 +42,7 @@ func (az *Cloud) InstanceExists(ctx context.Context, node *v1.Node) (bool, error return false, err } if unmanaged { - klog.V(4).Infof("InstanceExists: omitting unmanaged node %q", node.Name) + logger.V(4).Info("omitting unmanaged node", "nodeName", node.Name) return true, nil } @@ -63,6 +66,7 @@ func (az *Cloud) InstanceExists(ctx context.Context, node *v1.Node) (bool, error // InstanceShutdown returns true if the instance is shutdown according to the cloud provider. // Use the node.name or node.spec.providerID field to find the node in the cloud provider. func (az *Cloud) InstanceShutdown(ctx context.Context, node *v1.Node) (bool, error) { + logger := log.Background().WithName("InstanceShutdown") if node == nil { return false, nil } @@ -71,7 +75,7 @@ func (az *Cloud) InstanceShutdown(ctx context.Context, node *v1.Node) (bool, err return false, err } if unmanaged { - klog.V(4).Infof("InstanceShutdown: omitting unmanaged node %q", node.Name) + logger.V(4).Info("omitting unmanaged node", "nodeName", node.Name) return false, nil } providerID := node.Spec.ProviderID @@ -96,6 +100,7 @@ func (az *Cloud) InstanceShutdown(ctx context.Context, node *v1.Node) (bool, err // translated into specific fields in the Node object on registration. // Use the node.name or node.spec.providerID field to find the node in the cloud provider. func (az *Cloud) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloudprovider.InstanceMetadata, error) { + logger := log.Background().WithName("InstanceMetadata") meta := cloudprovider.InstanceMetadata{} if node == nil { return &meta, nil @@ -105,7 +110,7 @@ func (az *Cloud) InstanceMetadata(ctx context.Context, node *v1.Node) (*cloudpro return &meta, err } if unmanaged { - klog.V(4).Infof("InstanceMetadata: omitting unmanaged node %q", node.Name) + logger.V(4).Info("omitting unmanaged node", "nodeName", node.Name) return &meta, nil } diff --git a/pkg/provider/azure_interface_repo.go b/pkg/provider/azure_interface_repo.go index dd731392af..4e7cca51b2 100644 --- a/pkg/provider/azure_interface_repo.go +++ b/pkg/provider/azure_interface_repo.go @@ -22,12 +22,15 @@ import ( "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/network/armnetwork/v6" v1 "k8s.io/api/core/v1" "k8s.io/klog/v2" + + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) // CreateOrUpdateInterface invokes az.ComputeClientFactory.GetInterfaceClient().CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateInterface(ctx context.Context, service *v1.Service, nic *armnetwork.Interface) error { + logger := log.Background().WithName("CreateOrUpdateInterface") _, rerr := az.ComputeClientFactory.GetInterfaceClient().CreateOrUpdate(ctx, az.ResourceGroup, *nic.Name, *nic) - klog.V(10).Infof("InterfacesClient.CreateOrUpdate(%s): end", *nic.Name) + logger.V(10).Info("InterfacesClient.CreateOrUpdate: end", "nicName", *nic.Name) if rerr != nil { klog.Errorf("InterfacesClient.CreateOrUpdate(%s) failed: %s", *nic.Name, rerr.Error()) az.Event(service, v1.EventTypeWarning, "CreateOrUpdateInterface", rerr.Error()) diff --git a/pkg/provider/azure_loadbalancer.go b/pkg/provider/azure_loadbalancer.go index 247294c078..fc381e552b 100644 --- a/pkg/provider/azure_loadbalancer.go +++ b/pkg/provider/azure_loadbalancer.go @@ -532,12 +532,13 @@ func (az *Cloud) getLoadBalancerResourceGroup() string { // according to the mode annotation on the service. This could be happened when the LB selection mode of an // existing service is changed to another VMSS/VMAS. func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clusterName, expectedLBName string) bool { + logger := log.Background().WithName("shouldChangeLoadBalancer") // The load balancer can be changed in two cases: // 1. Using multiple standard load balancers. // 2. Migrate from multiple standard load balancers to single standard load balancer. if az.UseStandardLoadBalancer() { if !strings.EqualFold(currLBName, expectedLBName) { - klog.V(2).Infof("shouldChangeLoadBalancer(%s, %s, %s): change the LB to another one %s", service.Name, currLBName, clusterName, expectedLBName) + logger.V(2).Info("change the LB to another one", "service", service.Name, "currLBName", currLBName, "clusterName", clusterName, "expectedLBName", expectedLBName) return true } return false @@ -556,7 +557,7 @@ func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clust if strings.EqualFold(lbName, vmSetName) { if !strings.EqualFold(lbName, clusterName) && strings.EqualFold(az.VMSet.GetPrimaryVMSetName(), vmSetName) { - klog.V(2).Infof("shouldChangeLoadBalancer(%s, %s, %s): change the LB to another one", service.Name, currLBName, clusterName) + logger.V(2).Info("change the LB to another one", "service", service.Name, "currLBName", currLBName, "clusterName", clusterName) return true } return false @@ -567,7 +568,7 @@ func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clust // if the VMSS/VMAS of the current LB is different from the mode, change the LB // to another one - klog.V(2).Infof("shouldChangeLoadBalancer(%s, %s, %s): change the LB to another one", service.Name, currLBName, clusterName) + logger.V(2).Info("change the LB to another one", "service", service.Name, "currLBName", currLBName, "clusterName", clusterName) return true } @@ -575,6 +576,7 @@ func (az *Cloud) shouldChangeLoadBalancer(service *v1.Service, currLBName, clust // and delete the load balancer if there is no ip config on it. It returns the name of the deleted load balancer // and it will be used in reconcileLoadBalancer to remove the load balancer from the list. func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(ctx context.Context, lb *armnetwork.LoadBalancer, existingLBs []*armnetwork.LoadBalancer, fips []*armnetwork.FrontendIPConfiguration, clusterName string, service *v1.Service) (string, bool /* deleted PLS */, error) { + logger := log.Background().WithName("removeFrontendIPConfigurationFromLoadBalancer") if lb == nil || lb.Properties == nil || lb.Properties.FrontendIPConfigurations == nil { return "", false, nil } @@ -640,7 +642,7 @@ func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(ctx context.Conte } logPrefix := fmt.Sprintf("removeFrontendIPConfigurationFromLoadBalancer(%s, %q, %s, %s)", ptr.Deref(lb.Name, ""), fipNames, clusterName, service.Name) if len(fipConfigs) == 0 { - klog.V(2).Infof("%s: deleting load balancer because there is no remaining frontend IP configurations", logPrefix) + logger.V(2).Info("deleting load balancer because there is no remaining frontend IP configurations", "lbName", ptr.Deref(lb.Name, ""), "fipNames", fipNames, "clusterName", clusterName, "serviceName", service.Name) err := az.cleanOrphanedLoadBalancer(ctx, lb, existingLBs, service, clusterName) if err != nil { klog.Errorf("%s: failed to cleanupOrphanedLoadBalancer: %v", logPrefix, err) @@ -648,7 +650,7 @@ func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(ctx context.Conte } deletedLBName = ptr.Deref(lb.Name, "") } else { - klog.V(2).Infof("%s: updating the load balancer", logPrefix) + logger.V(2).Info("updating the load balancer", "lbName", ptr.Deref(lb.Name, ""), "fipNames", fipNames, "clusterName", clusterName, "serviceName", service.Name) err := az.CreateOrUpdateLB(ctx, service, *lb) if err != nil { klog.Errorf("%s: failed to CreateOrUpdateLB: %v", logPrefix, err) @@ -660,6 +662,7 @@ func (az *Cloud) removeFrontendIPConfigurationFromLoadBalancer(ctx context.Conte } func (az *Cloud) cleanOrphanedLoadBalancer(ctx context.Context, lb *armnetwork.LoadBalancer, existingLBs []*armnetwork.LoadBalancer, service *v1.Service, clusterName string) error { + logger := log.Background().WithName("cleanOrphanedLoadBalancer") lbName := ptr.Deref(lb.Name, "") serviceName := getServiceName(service) isBackendPoolPreConfigured := az.isBackendPoolPreConfigured(service) @@ -673,7 +676,7 @@ func (az *Cloud) cleanOrphanedLoadBalancer(ctx context.Context, lb *armnetwork.L lbBackendPoolIDsToDelete = append(lbBackendPoolIDsToDelete, lbBackendPoolIDs[consts.IPVersionIPv6]) } if isBackendPoolPreConfigured { - klog.V(2).Infof("cleanOrphanedLoadBalancer(%s, %s, %s): ignore cleanup of dirty lb because the lb is pre-configured", lbName, serviceName, clusterName) + logger.V(2).Info("ignore cleanup of dirty lb because the lb is pre-configured", "lbName", lbName, "serviceName", serviceName, "clusterName", clusterName) } else { foundLB := false for _, existingLB := range existingLBs { @@ -683,13 +686,13 @@ func (az *Cloud) cleanOrphanedLoadBalancer(ctx context.Context, lb *armnetwork.L } } if !foundLB { - klog.V(2).Infof("cleanOrphanedLoadBalancer: the LB %s doesn't exist, will not delete it", ptr.Deref(lb.Name, "")) + logger.V(2).Info("cleanOrphanedLoadBalancer: the LB doesn't exist, will not delete it", "lbName", ptr.Deref(lb.Name, "")) return nil } // When FrontendIPConfigurations is empty, we need to delete the Azure load balancer resource itself, // because an Azure load balancer cannot have an empty FrontendIPConfigurations collection - klog.V(2).Infof("cleanOrphanedLoadBalancer(%s, %s, %s): deleting the LB since there are no remaining frontendIPConfigurations", lbName, serviceName, clusterName) + logger.V(2).Info("deleting the LB since there are no remaining frontendIPConfigurations", "lbName", lbName, "serviceName", serviceName, "clusterName", clusterName) // Remove backend pools from vmSets. This is required for virtual machine scale sets before removing the LB. if _, ok := az.VMSet.(*availabilitySet); ok { @@ -731,13 +734,14 @@ func (az *Cloud) cleanOrphanedLoadBalancer(ctx context.Context, lb *armnetwork.L return deleteErr } } - klog.V(10).Infof("cleanOrphanedLoadBalancer(%s, %s, %s): az.DeleteLB finished", lbName, serviceName, clusterName) + logger.V(10).Info("az.DeleteLB finished", "lbName", lbName, "serviceName", serviceName, "clusterName", clusterName) } return nil } // safeDeleteLoadBalancer deletes the load balancer after decoupling it from the vmSet func (az *Cloud) safeDeleteLoadBalancer(ctx context.Context, lb armnetwork.LoadBalancer, clusterName string, service *v1.Service) error { + logger := log.Background().WithName("safeDeleteLoadBalancer") vmSetName := az.mapLoadBalancerNameToVMSet(ptr.Deref(lb.Name, ""), clusterName) lbBackendPoolIDsToDelete := []string{} if lb.Properties != nil && lb.Properties.BackendAddressPools != nil { @@ -749,7 +753,7 @@ func (az *Cloud) safeDeleteLoadBalancer(ctx context.Context, lb armnetwork.LoadB return fmt.Errorf("safeDeleteLoadBalancer: failed to EnsureBackendPoolDeleted: %w", err) } - klog.V(2).Infof("safeDeleteLoadBalancer: deleting LB %s", ptr.Deref(lb.Name, "")) + logger.V(2).Info("deleting LB", "lbName", ptr.Deref(lb.Name, "")) if rerr := az.DeleteLB(ctx, service, ptr.Deref(lb.Name, "")); rerr != nil { return rerr } @@ -937,15 +941,16 @@ func (az *Cloud) getServiceLoadBalancer( // then selects the first one (sorted based on name). // Note: this function is only useful for basic LB clusters. func (az *Cloud) selectLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, existingLBs []*armnetwork.LoadBalancer, nodes []*v1.Node) (selectedLB *armnetwork.LoadBalancer, existsLb bool, err error) { + logger := log.Background().WithName("selectLoadBalancer") isInternal := requiresInternalLoadBalancer(service) serviceName := getServiceName(service) - klog.V(2).Infof("selectLoadBalancer for service (%s): isInternal(%v) - start", serviceName, isInternal) + logger.V(2).Info("start", "serviceName", serviceName, "isInternal", isInternal) vmSetNames, err := az.VMSet.GetVMSetNames(ctx, service, nodes) if err != nil { klog.Errorf("az.selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - az.GetVMSetNames failed, err=(%v)", clusterName, serviceName, isInternal, err) return nil, false, err } - klog.V(2).Infof("selectLoadBalancer: cluster(%s) service(%s) isInternal(%t) - vmSetNames %v", clusterName, serviceName, isInternal, vmSetNames) + logger.V(2).Info("", "clusterName", clusterName, "serviceName", serviceName, "isInternal", isInternal, "vmSetNames", vmSetNames) mapExistingLBs := map[string]*armnetwork.LoadBalancer{} for _, lb := range existingLBs { @@ -1015,12 +1020,13 @@ func (az *Cloud) selectLoadBalancer(ctx context.Context, clusterName string, ser // and the second one as additional one. With DualStack support, the second IP may be // the IP of another IP family so the new logic returns two variables. func (az *Cloud) getServiceLoadBalancerStatus(ctx context.Context, service *v1.Service, lb *armnetwork.LoadBalancer) (status *v1.LoadBalancerStatus, lbIPsPrimaryPIPs []string, fipConfigs []*armnetwork.FrontendIPConfiguration, err error) { + logger := log.Background().WithName("getServiceLoadBalancerStatus") if lb == nil { - klog.V(10).Info("getServiceLoadBalancerStatus: lb is nil") + logger.V(10).Info("lb is nil") return nil, nil, nil, nil } if lb.Properties == nil || len(lb.Properties.FrontendIPConfigurations) == 0 { - klog.V(10).Info("getServiceLoadBalancerStatus: lb.Properties.FrontendIPConfigurations is nil") + logger.V(10).Info("lb.Properties.FrontendIPConfigurations is nil") return nil, nil, nil, nil } @@ -1031,7 +1037,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(ctx context.Context, service *v1.S ipConfiguration := lb.Properties.FrontendIPConfigurations[i] owns, isPrimaryService, _ := az.serviceOwnsFrontendIP(ctx, ipConfiguration, service) if owns { - klog.V(2).Infof("get(%s): lb(%s) - found frontend IP config, primary service: %v", serviceName, ptr.Deref(lb.Name, ""), isPrimaryService) + logger.V(2).Info("found frontend IP config", "serviceName", serviceName, "lbName", ptr.Deref(lb.Name, ""), "isPrimaryService", isPrimaryService) var lbIP *string if isInternal { @@ -1057,7 +1063,7 @@ func (az *Cloud) getServiceLoadBalancerStatus(ctx context.Context, service *v1.S } } - klog.V(2).Infof("getServiceLoadBalancerStatus gets ingress IP %q from frontendIPConfiguration %q for service %q", ptr.Deref(lbIP, ""), ptr.Deref(ipConfiguration.Name, ""), serviceName) + logger.V(2).Info("gets ingress IP from frontendIPConfiguration for service", "ingressIP", ptr.Deref(lbIP, ""), "frontendIPConfiguration", ptr.Deref(ipConfiguration.Name, ""), "serviceName", serviceName) lbIngresses = append(lbIngresses, v1.LoadBalancerIngress{IP: ptr.Deref(lbIP, "")}) lbIPsPrimaryPIPs = append(lbIPsPrimaryPIPs, ptr.Deref(lbIP, "")) @@ -1143,6 +1149,7 @@ func updateServiceLoadBalancerIPs(service *v1.Service, serviceIPs []string) *v1. } func (az *Cloud) ensurePublicIPExists(ctx context.Context, service *v1.Service, pipName string, domainNameLabel, clusterName string, shouldPIPExisted, foundDNSLabelAnnotation, isIPv6 bool) (*armnetwork.PublicIPAddress, error) { + logger := log.Background().WithName("ensurePublicIPExists") pipResourceGroup := az.getPublicIPAddressResourceGroup(service) pip, existsPip, err := az.getPublicIPAddress(ctx, pipResourceGroup, pipName, azcache.CacheReadTypeDefault) if err != nil { @@ -1184,12 +1191,11 @@ func (az *Cloud) ensurePublicIPExists(ctx context.Context, service *v1.Service, // return if pip exist and dns label is the same if strings.EqualFold(getDomainNameLabel(pip), domainNameLabel) { if existingServiceName := getServiceFromPIPDNSTags(pip.Tags); existingServiceName != "" && strings.EqualFold(existingServiceName, serviceName) { - klog.V(6).Infof("ensurePublicIPExists for service(%s): pip(%s) - "+ - "the service is using the DNS label on the public IP", serviceName, pipName) + logger.V(6).Info("the service is using the DNS label on the public IP", "serviceName", serviceName, "pipName", pipName) var err error if changed { - klog.V(2).Infof("ensurePublicIPExists: updating the PIP %s for the incoming service %s", pipName, serviceName) + logger.V(2).Info("updating the PIP for the incoming service", "pipName", pipName, "serviceName", serviceName) err = az.CreateOrUpdatePIP(service, pipResourceGroup, pip) if err != nil { return nil, err @@ -1204,7 +1210,7 @@ func (az *Cloud) ensurePublicIPExists(ctx context.Context, service *v1.Service, } } - klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - updating", serviceName, ptr.Deref(pip.Name, "")) + logger.V(2).Info("updating", "serviceName", serviceName, "pipName", ptr.Deref(pip.Name, "")) if pip.Properties == nil { pip.Properties = &armnetwork.PublicIPAddressPropertiesFormat{ PublicIPAllocationMethod: to.Ptr(armnetwork.IPAllocationMethodStatic), @@ -1223,7 +1229,7 @@ func (az *Cloud) ensurePublicIPExists(ctx context.Context, service *v1.Service, Location: ptr.To(az.Location), } if az.HasExtendedLocation() { - klog.V(2).Infof("Using extended location with name %s, and type %s for PIP", az.ExtendedLocationName, az.ExtendedLocationType) + logger.V(2).Info("Using extended location for PIP", "name", az.ExtendedLocationName, "type", az.ExtendedLocationType) var typ *armnetwork.ExtendedLocationTypes if getExtendedLocationTypeFromString(az.ExtendedLocationType) == armnetwork.ExtendedLocationTypesEdgeZone { typ = to.Ptr(armnetwork.ExtendedLocationTypesEdgeZone) @@ -1267,7 +1273,7 @@ func (az *Cloud) ensurePublicIPExists(ctx context.Context, service *v1.Service, } } } - klog.V(2).Infof("ensurePublicIPExists for service(%s): pip(%s) - creating", serviceName, *pip.Name) + logger.V(2).Info("creating", "serviceName", serviceName, "pipName", *pip.Name) } if !isUserAssignedPIP && az.ensurePIPTagged(service, pip) { changed = true @@ -1292,14 +1298,14 @@ func (az *Cloud) ensurePublicIPExists(ctx context.Context, service *v1.Service, } if changed { - klog.V(2).Infof("CreateOrUpdatePIP(%s, %q): start", pipResourceGroup, *pip.Name) + logger.V(2).Info("CreateOrUpdatePIP: start", "pipResourceGroup", pipResourceGroup, "pipName", *pip.Name) err = az.CreateOrUpdatePIP(service, pipResourceGroup, pip) if err != nil { - klog.V(2).Infof("ensure(%s) abort backoff: pip(%s)", serviceName, *pip.Name) + logger.V(2).Info("ensure service abort backoff: pip", "serviceName", serviceName, "pipName", *pip.Name) return nil, err } - klog.V(10).Infof("CreateOrUpdatePIP(%s, %q): end", pipResourceGroup, *pip.Name) + logger.V(10).Info("CreateOrUpdatePIP: end", "pipResourceGroup", pipResourceGroup, "pipName", *pip.Name) } pip, rerr := az.NetworkClientFactory.GetPublicIPAddressClient().Get(ctx, pipResourceGroup, *pip.Name, nil) @@ -1310,13 +1316,14 @@ func (az *Cloud) ensurePublicIPExists(ctx context.Context, service *v1.Service, } func (az *Cloud) reconcileIPSettings(pip *armnetwork.PublicIPAddress, service *v1.Service, isIPv6 bool) bool { + logger := log.Background().WithName("reconcileIPSettings") var changed bool serviceName := getServiceName(service) if isIPv6 { if !strings.EqualFold(string(*pip.Properties.PublicIPAddressVersion), string(armnetwork.IPVersionIPv6)) { pip.Properties.PublicIPAddressVersion = to.Ptr(armnetwork.IPVersionIPv6) - klog.V(2).Infof("service(%s): pip(%s) - should be created as IPv6", serviceName, *pip.Name) + logger.V(2).Info("should be created as IPv6", "serviceName", serviceName, "pipName", *pip.Name) changed = true } @@ -1333,7 +1340,7 @@ func (az *Cloud) reconcileIPSettings(pip *armnetwork.PublicIPAddress, service *v } else { if !strings.EqualFold(string(*pip.Properties.PublicIPAddressVersion), string(armnetwork.IPVersionIPv4)) { pip.Properties.PublicIPAddressVersion = to.Ptr(armnetwork.IPVersionIPv4) - klog.V(2).Infof("service(%s): pip(%s) - should be created as IPv4", serviceName, *pip.Name) + logger.V(2).Info("should be created as IPv4", "serviceName", serviceName, "pipName", *pip.Name) changed = true } } @@ -1346,6 +1353,7 @@ func reconcileDNSSettings( domainNameLabel, serviceName, pipName string, isUserAssignedPIP bool, ) (bool, error) { + logger := log.Background().WithName("reconcileDNSSettings") var changed bool if existingServiceName := getServiceFromPIPDNSTags(pip.Tags); existingServiceName != "" && !strings.EqualFold(existingServiceName, serviceName) { @@ -1360,7 +1368,7 @@ func reconcileDNSSettings( } else { if pip.Properties.DNSSettings == nil || pip.Properties.DNSSettings.DomainNameLabel == nil { - klog.V(6).Infof("ensurePublicIPExists for service(%s): pip(%s) - no existing DNS label on the public IP, create one", serviceName, pipName) + logger.V(6).Info("ensurePublicIPExists - no existing DNS label on the public IP, create one", "serviceName", serviceName, "pipName", pipName) pip.Properties.DNSSettings = &armnetwork.PublicIPAddressDNSSettings{ DomainNameLabel: &domainNameLabel, } @@ -1730,6 +1738,7 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurations( existingLBs []*armnetwork.LoadBalancer, nodes []*v1.Node, ) (err error) { + logger := log.Background().WithName("reconcileMultipleStandardLoadBalancerConfigurations") if !az.UseMultipleStandardLoadBalancers() { return nil } @@ -1766,7 +1775,7 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurations( prefix := az.GetLoadBalancerName(ctx, "", &svc) svcName := getServiceName(&svc) rulePrefixToSVCNameMap[strings.ToLower(prefix)] = svcName - klog.V(2).Infof("reconcileMultipleStandardLoadBalancerConfigurations: found service %q with prefix %q", svcName, prefix) + logger.V(2).Info("found service with prefix", "service", svcName, "prefix", prefix) } } @@ -1782,16 +1791,13 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurations( } svcName, ok := rulePrefixToSVCNameMap[strings.ToLower(rulePrefix)] if ok { - klog.V(2).Infof( - "reconcileMultipleStandardLoadBalancerConfigurations: found load balancer %q with rule %q of service %q", - lbName, ruleName, svcName, - ) + logger.V(2).Info("found load balancer with rule of service", "load balancer", lbName, "rule", ruleName, "service", svcName) for i := range az.MultipleStandardLoadBalancerConfigurations { if strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), az.MultipleStandardLoadBalancerConfigurations[i].Name) { az.multipleStandardLoadBalancersActiveServicesLock.Lock() az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = utilsets.SafeInsert(az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices, svcName) az.multipleStandardLoadBalancersActiveServicesLock.Unlock() - klog.V(2).Infof("reconcileMultipleStandardLoadBalancerConfigurations: service(%s) is active on lb(%s)", svcName, lbName) + logger.V(2).Info("service is active on lb", "service", svcName, "load balancer", lbName) } } } @@ -1807,9 +1813,10 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurations( // This entails adding rules/probes for expected Ports and removing stale rules/ports. // nodes only used if wantLb is true func (az *Cloud) reconcileLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node, wantLb bool) (*armnetwork.LoadBalancer, bool /*needRetry*/, error) { + logger := log.Background().WithName("reconcileLoadBalancer") isBackendPoolPreConfigured := az.isBackendPoolPreConfigured(service) serviceName := getServiceName(service) - klog.V(2).Infof("reconcileLoadBalancer for service(%s) - wantLb(%t): started", serviceName, wantLb) + logger.V(2).Info("started", "serviceName", serviceName, "wantLb", wantLb) existingLBs, err := az.ListManagedLBs(ctx, service, nodes, clusterName) if err != nil { @@ -1843,7 +1850,7 @@ func (az *Cloud) reconcileLoadBalancer(ctx context.Context, clusterName string, return nil, false, err } if deletedPLS { - klog.V(2).InfoS("reconcileLoadBalancer: PLS is deleted and the LB ETag has changed, need to retry", "service", serviceName) + logger.V(2).Info("PLS is deleted and the LB ETag has changed, need to retry", "service", serviceName) return lb, true, nil } existingLBs = newLBs @@ -1851,8 +1858,7 @@ func (az *Cloud) reconcileLoadBalancer(ctx context.Context, clusterName string, lbName := *lb.Name lbResourceGroup := az.getLoadBalancerResourceGroup() lbBackendPoolIDs := az.getBackendPoolIDsForService(service, clusterName, lbName) - klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s/%s) wantLb(%t) resolved load balancer name", - serviceName, lbResourceGroup, lbName, wantLb) + logger.V(2).Info("resolved load balancer name", "service", serviceName, "lbResourceGroup", lbResourceGroup, "lbName", lbName, "wantLb", wantLb) lbFrontendIPConfigNames := az.getFrontendIPConfigNames(service) lbFrontendIPConfigIDs := map[bool]string{ consts.IPVersionIPv4: az.getFrontendIPConfigID(lbName, lbFrontendIPConfigNames[consts.IPVersionIPv4]), @@ -1974,7 +1980,7 @@ func (az *Cloud) reconcileLoadBalancer(ctx context.Context, clusterName string, } } if needRetry { - klog.V(2).InfoS("reconcileLoadBalancer: PLS is deleted and the LB ETag has changed, need to retry", "service", serviceName) + logger.V(2).Info("PLS is deleted and the LB ETag has changed, need to retry", "service", serviceName) return lb, true, nil } } @@ -1986,7 +1992,7 @@ func (az *Cloud) reconcileLoadBalancer(ctx context.Context, clusterName string, return nil, false, err } } else { - klog.V(2).Infof("reconcileLoadBalancer: reconcileLoadBalancer for service(%s): lb(%s) - updating", serviceName, lbName) + logger.V(2).Info("updating", "service", serviceName, "load balancer", lbName) err := az.CreateOrUpdateLB(ctx, service, *lb) if err != nil { klog.Errorf("reconcileLoadBalancer for service(%s) abort backoff: lb(%s) - updating: %s", serviceName, lbName, err.Error()) @@ -2042,7 +2048,7 @@ func (az *Cloud) reconcileLoadBalancer(ctx context.Context, clusterName string, az.reconcileMultipleStandardLoadBalancerConfigurationStatus(wantLb, serviceName, lbName) } - klog.V(2).Infof("reconcileLoadBalancer for service(%s): lb(%s) finished", serviceName, lbName) + logger.V(2).Info("finished", "serviceName", serviceName, "lbName", lbName) return lb, false, nil } @@ -2113,9 +2119,10 @@ func removeLBFromList(lbs *[]*armnetwork.LoadBalancer, lbName string) { // removeNodeFromLBConfig searches for the occurrence of the given node in the lb configs and removes it func (az *Cloud) removeNodeFromLBConfig(nodeNameToLBConfigIDXMap map[string]int, nodeName string) { + logger := log.Background().WithName("removeNodeFromLBConfig") if idx, ok := nodeNameToLBConfigIDXMap[nodeName]; ok { currentLBConfigName := az.MultipleStandardLoadBalancerConfigurations[idx].Name - klog.V(4).Infof("reconcileMultipleStandardLoadBalancerBackendNodes: remove node(%s) on lb(%s)", nodeName, currentLBConfigName) + logger.V(4).Info("reconcileMultipleStandardLoadBalancerBackendNodes: remove node on lb", "node", nodeName, "lb", currentLBConfigName) az.multipleStandardLoadBalancersActiveNodesLock.Lock() az.MultipleStandardLoadBalancerConfigurations[idx].ActiveNodes.Delete(strings.ToLower(nodeName)) az.multipleStandardLoadBalancersActiveNodesLock.Unlock() @@ -2125,7 +2132,7 @@ func (az *Cloud) removeNodeFromLBConfig(nodeNameToLBConfigIDXMap map[string]int, // removeDeletedNodesFromLoadBalancerConfigurations removes the deleted nodes // that do not exist in nodes list from the load balancer configurations func (az *Cloud) removeDeletedNodesFromLoadBalancerConfigurations(nodes []*v1.Node) map[string]int { - logger := klog.Background().WithName("removeDeletedNodesFromLoadBalancerConfigurations") + logger := log.Background().WithName("removeDeletedNodesFromLoadBalancerConfigurations") nodeNamesSet := utilsets.NewString() for _, node := range nodes { nodeNamesSet.Insert(node.Name) @@ -2162,6 +2169,7 @@ func (az *Cloud) accommodateNodesByPrimaryVMSet( nodes []*v1.Node, nodeNameToLBConfigIDXMap map[string]int, ) error { + logger := log.Background().WithName("accommodateNodesByPrimaryVMSet") for _, node := range nodes { if _, ok := az.nodesWithCorrectLoadBalancerByPrimaryVMSet.Load(strings.ToLower(node.Name)); ok { continue @@ -2178,13 +2186,13 @@ func (az *Cloud) accommodateNodesByPrimaryVMSet( if strings.EqualFold(multiSLBConfig.PrimaryVMSet, vmSetName) { foundPrimaryLB := isLBInList(lbs, multiSLBConfig.Name) if !foundPrimaryLB && !strings.EqualFold(trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix), multiSLBConfig.Name) { - klog.V(4).Infof("accommodateNodesByPrimaryVMSet: node(%s) should be on lb(%s) because of primary vmSet (%s), but the lb is not found and will not be created this time, will ignore the primaryVMSet", node.Name, multiSLBConfig.Name, vmSetName) + logger.V(4).Info("node should be on lb because of primary vmSet, but the lb is not found and will not be created this time, will ignore the primaryVMSet", "node", node.Name, "lb", multiSLBConfig.Name, "vmSetName", vmSetName) continue } az.nodesWithCorrectLoadBalancerByPrimaryVMSet.Store(strings.ToLower(node.Name), struct{}{}) if !multiSLBConfig.ActiveNodes.Has(node.Name) { - klog.V(4).Infof("accommodateNodesByPrimaryVMSet: node(%s) should be on lb(%s) because of primary vmSet (%s)", node.Name, multiSLBConfig.Name, vmSetName) + logger.V(4).Info("node should be on lb because of primary vmSet", "node", node.Name, "lb", multiSLBConfig.Name, "vmSetName", vmSetName) az.removeNodeFromLBConfig(nodeNameToLBConfigIDXMap, node.Name) @@ -2298,7 +2306,7 @@ func (az *Cloud) accommodateNodesByNodeSelector( continue } - klog.V(4).Infof("accommodateNodesByNodeSelector: node(%s) should be on lb(%s) it is the eligible LB with fewest number of nodes", node.Name, az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].Name) + logger.V(4).Info("node should be on lb as it is the eligible LB with fewest number of nodes", "node", node.Name, "lb", az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].Name) az.multipleStandardLoadBalancersActiveNodesLock.Lock() az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].ActiveNodes = utilsets.SafeInsert(az.MultipleStandardLoadBalancerConfigurations[minNodesIDX].ActiveNodes, node.Name) az.multipleStandardLoadBalancersActiveNodesLock.Unlock() @@ -2406,16 +2414,17 @@ func (az *Cloud) recordExistingNodesOnLoadBalancers(clusterName string, lbs []*a } func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurationStatus(wantLb bool, svcName, lbName string) { + logger := log.Background().WithName("reconcileMultipleStandardLoadBalancerConfigurationStatus") lbName = trimSuffixIgnoreCase(lbName, consts.InternalLoadBalancerNameSuffix) for i := range az.MultipleStandardLoadBalancerConfigurations { if strings.EqualFold(lbName, az.MultipleStandardLoadBalancerConfigurations[i].Name) { az.multipleStandardLoadBalancersActiveServicesLock.Lock() if wantLb { - klog.V(4).Infof("reconcileMultipleStandardLoadBalancerConfigurationStatus: service(%s) is active on lb(%s)", svcName, lbName) + logger.V(4).Info("service is active on lb", "service", svcName, "lb", lbName) az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices = utilsets.SafeInsert(az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices, svcName) } else { - klog.V(4).Infof("reconcileMultipleStandardLoadBalancerConfigurationStatus: service(%s) is not active on lb(%s) any more", svcName, lbName) + logger.V(4).Info("service is not active on lb any more", "service", svcName, "lb", lbName) az.MultipleStandardLoadBalancerConfigurations[i].ActiveServices.Delete(svcName) } az.multipleStandardLoadBalancersActiveServicesLock.Unlock() @@ -2425,6 +2434,7 @@ func (az *Cloud) reconcileMultipleStandardLoadBalancerConfigurationStatus(wantLb } func (az *Cloud) reconcileLBProbes(lb *armnetwork.LoadBalancer, service *v1.Service, serviceName string, wantLb bool, expectedProbes []*armnetwork.Probe) bool { + logger := log.Background().WithName("reconcileLBProbes") expectedProbes, _ = az.keepSharedProbe(service, *lb, expectedProbes, wantLb) // remove unwanted probes @@ -2436,15 +2446,15 @@ func (az *Cloud) reconcileLBProbes(lb *armnetwork.LoadBalancer, service *v1.Serv for i := len(updatedProbes) - 1; i >= 0; i-- { existingProbe := updatedProbes[i] if az.serviceOwnsRule(service, *existingProbe.Name) { - klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - considering evicting", serviceName, wantLb, *existingProbe.Name) + logger.V(10).Info("considering evicting", "service", serviceName, "wantLb", wantLb, "probeName", *existingProbe.Name) keepProbe := false if findProbe(expectedProbes, existingProbe) { - klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - keeping", serviceName, wantLb, *existingProbe.Name) + logger.V(10).Info("keeping", "service", serviceName, "wantLb", wantLb, "probeName", *existingProbe.Name) keepProbe = true } if !keepProbe { updatedProbes = append(updatedProbes[:i], updatedProbes[i+1:]...) - klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - dropping", serviceName, wantLb, *existingProbe.Name) + logger.V(2).Info("dropping", "service", serviceName, "wantLb", wantLb, "probeName", *existingProbe.Name) dirtyProbes = true } } @@ -2453,24 +2463,25 @@ func (az *Cloud) reconcileLBProbes(lb *armnetwork.LoadBalancer, service *v1.Serv for _, expectedProbe := range expectedProbes { foundProbe := false if findProbe(updatedProbes, expectedProbe) { - klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - already exists", serviceName, wantLb, *expectedProbe.Name) + logger.V(10).Info("already exists", "service", serviceName, "wantLb", wantLb, "probeName", *expectedProbe.Name) foundProbe = true } if !foundProbe { - klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb probe(%s) - adding", serviceName, wantLb, *expectedProbe.Name) + logger.V(10).Info("adding", "service", serviceName, "wantLb", wantLb, "probeName", *expectedProbe.Name) updatedProbes = append(updatedProbes, expectedProbe) dirtyProbes = true } } if dirtyProbes { probesJSON, _ := json.Marshal(expectedProbes) - klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb probes updated: %s", serviceName, wantLb, string(probesJSON)) + logger.V(2).Info("updated", "service", serviceName, "wantLb", wantLb, "probes", string(probesJSON)) lb.Properties.Probes = updatedProbes } return dirtyProbes } func (az *Cloud) reconcileLBRules(lb *armnetwork.LoadBalancer, service *v1.Service, serviceName string, wantLb bool, expectedRules []*armnetwork.LoadBalancingRule) bool { + logger := log.Background().WithName("reconcileLBRules") // update rules dirtyRules := false var updatedRules []*armnetwork.LoadBalancingRule @@ -2483,13 +2494,13 @@ func (az *Cloud) reconcileLBRules(lb *armnetwork.LoadBalancer, service *v1.Servi existingRule := updatedRules[i] if az.serviceOwnsRule(service, *existingRule.Name) { keepRule := false - klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - considering evicting", serviceName, wantLb, *existingRule.Name) + logger.V(10).Info("considering evicting", "service", serviceName, "wantLb", wantLb, "rule", *existingRule.Name) if findRule(expectedRules, existingRule, wantLb) { - klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - keeping", serviceName, wantLb, *existingRule.Name) + logger.V(10).Info("keeping", "service", serviceName, "wantLb", wantLb, "rule", *existingRule.Name) keepRule = true } if !keepRule { - klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - dropping", serviceName, wantLb, *existingRule.Name) + logger.V(2).Info("dropping", "service", serviceName, "wantLb", wantLb, "rule", *existingRule.Name) updatedRules = append(updatedRules[:i], updatedRules[i+1:]...) dirtyRules = true } @@ -2499,18 +2510,18 @@ func (az *Cloud) reconcileLBRules(lb *armnetwork.LoadBalancer, service *v1.Servi for _, expectedRule := range expectedRules { foundRule := false if findRule(updatedRules, expectedRule, wantLb) { - klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) - already exists", serviceName, wantLb, *expectedRule.Name) + logger.V(10).Info("already exists", "service", serviceName, "wantLb", wantLb, "rule", *expectedRule.Name) foundRule = true } if !foundRule { - klog.V(10).Infof("reconcileLoadBalancer for service (%s)(%t): lb rule(%s) adding", serviceName, wantLb, *expectedRule.Name) + logger.V(10).Info("adding", "service", serviceName, "wantLb", wantLb, "rule", *expectedRule.Name) updatedRules = append(updatedRules, expectedRule) dirtyRules = true } } if dirtyRules { ruleJSON, _ := json.Marshal(expectedRules) - klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb rules updated: %s", serviceName, wantLb, string(ruleJSON)) + logger.V(2).Info("updated", "service", serviceName, "wantLb", wantLb, "rules", string(ruleJSON)) lb.Properties.LoadBalancingRules = updatedRules } return dirtyRules @@ -2525,6 +2536,7 @@ func (az *Cloud) reconcileFrontendIPConfigs( wantLb bool, lbFrontendIPConfigNames map[bool]string, ) ([]*armnetwork.FrontendIPConfiguration, []*armnetwork.FrontendIPConfiguration, bool, error) { + logger := log.Background().WithName("reconcileFrontendIPConfigs") var err error lbName := *lb.Name serviceName := getServiceName(service) @@ -2557,9 +2569,9 @@ func (az *Cloud) reconcileFrontendIPConfigs( var configNameToBeDeleted string if newConfigs[i].Name != nil { configNameToBeDeleted = *newConfigs[i].Name - klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, configNameToBeDeleted) + logger.V(2).Info("dropping", "service", serviceName, "wantLb", wantLb, "configNameToBeDeleted", configNameToBeDeleted) } else { - klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): nil name of lb frontendconfig", serviceName, wantLb) + logger.V(2).Info("nil name", "service", serviceName, "wantLb", wantLb) } toDeleteConfigs = append(toDeleteConfigs, newConfigs[i]) @@ -2601,10 +2613,10 @@ func (az *Cloud) reconcileFrontendIPConfigs( config := newConfigs[i] isServiceOwnsFrontendIP, _, fipIPVersion := az.serviceOwnsFrontendIP(ctx, config, service) if !isServiceOwnsFrontendIP { - klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): the frontend IP configuration %s does not belong to the service", serviceName, ptr.Deref(config.Name, "")) + logger.V(4).Info("the frontend IP configuration does not belong to the service", "service", serviceName, "config", ptr.Deref(config.Name, "")) continue } - klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): checking owned frontend IP configuration %s", serviceName, ptr.Deref(config.Name, "")) + logger.V(4).Info("checking owned frontend IP configuration", "service", serviceName, "config", ptr.Deref(config.Name, "")) var isIPv6 bool var err error if fipIPVersion != nil { @@ -2620,7 +2632,7 @@ func (az *Cloud) reconcileFrontendIPConfigs( return nil, toDeleteConfigs, false, err } if isFipChanged { - klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - dropping", serviceName, wantLb, *config.Name) + logger.V(2).Info("dropping", "service", serviceName, "wantLb", wantLb, "config", *config.Name) toDeleteConfigs = append(toDeleteConfigs, newConfigs[i]) newConfigs = append(newConfigs[:i], newConfigs[i+1:]...) dirtyConfigs = true @@ -2637,8 +2649,7 @@ func (az *Cloud) reconcileFrontendIPConfigs( } addNewFIPOfService := func(isIPv6 bool) error { - klog.V(4).Infof("ensure(%s): lb(%s) - creating a new frontend IP config %q (isIPv6=%t)", - serviceName, lbName, lbFrontendIPConfigNames[isIPv6], isIPv6) + logger.V(4).Info("creating a new frontend IP config", "ensure service", serviceName, "lb", lbName, "config", lbFrontendIPConfigNames[isIPv6], "isIPv6", isIPv6) // construct FrontendIPConfigurationPropertiesFormat var fipConfigurationProperties *armnetwork.FrontendIPConfigurationPropertiesFormat @@ -2664,20 +2675,20 @@ func (az *Cloud) reconcileFrontendIPConfigs( return privateIP != "" } if loadBalancerIP != "" { - klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): use loadBalancerIP %q from Service spec", serviceName, loadBalancerIP) + logger.V(4).Info("use loadBalancerIP from Service spec", "service", serviceName, "loadBalancerIP", loadBalancerIP) configProperties.PrivateIPAllocationMethod = to.Ptr(armnetwork.IPAllocationMethodStatic) configProperties.PrivateIPAddress = &loadBalancerIP } else if status != nil && len(status.Ingress) > 0 && ingressIPInSubnet(status.Ingress) { - klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): keep the original private IP %s", serviceName, privateIP) + logger.V(4).Info("keep the original private IP", "service", serviceName, "privateIP", privateIP) configProperties.PrivateIPAllocationMethod = to.Ptr(armnetwork.IPAllocationMethodStatic) configProperties.PrivateIPAddress = ptr.To(privateIP) } else if len(service.Status.LoadBalancer.Ingress) > 0 && ingressIPInSubnet(service.Status.LoadBalancer.Ingress) { - klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): keep the original private IP %s from service.status.loadbalacner.ingress", serviceName, privateIP) + logger.V(4).Info("keep the original private IP from service.status.loadbalacner.ingress", "service", serviceName, "privateIP", privateIP) configProperties.PrivateIPAllocationMethod = to.Ptr(armnetwork.IPAllocationMethodStatic) configProperties.PrivateIPAddress = ptr.To(privateIP) } else { // We'll need to call GetLoadBalancer later to retrieve allocated IP. - klog.V(4).Infof("reconcileFrontendIPConfigs for service (%s): dynamically allocate the private IP", serviceName) + logger.V(4).Info("dynamically allocate the private IP", "service", serviceName) configProperties.PrivateIPAllocationMethod = to.Ptr(armnetwork.IPAllocationMethodDynamic) } @@ -2710,7 +2721,7 @@ func (az *Cloud) reconcileFrontendIPConfigs( } } newConfigs = append(newConfigs, newConfig) - klog.V(2).Infof("reconcileLoadBalancer for service (%s)(%t): lb frontendconfig(%s) - adding", serviceName, wantLb, lbFrontendIPConfigNames[isIPv6]) + logger.V(2).Info("lb frontendconfig - adding", "service", serviceName, "wantLb", wantLb, "config", lbFrontendIPConfigNames[isIPv6]) dirtyConfigs = true return nil } @@ -2742,6 +2753,7 @@ func (az *Cloud) getFrontendZones( isFipChanged bool, serviceName, lbFrontendIPConfigName string, ) error { + logger := log.Background().WithName("getFrontendZones") if !isFipChanged { // fetch zone information from API for new frontends // only add zone information for new internal frontend IP configurations for standard load balancer not deployed to an edge zone. location := az.Location @@ -2754,10 +2766,10 @@ func (az *Cloud) getFrontendZones( } } else { if previousZone == nil { // keep the existing zone information for existing frontends - klog.V(2).Infof("getFrontendZones for service (%s): lb frontendconfig(%s): setting zone to nil", serviceName, lbFrontendIPConfigName) + logger.V(2).Info("setting zone to nil", "service", serviceName, "lbFrontendIPConfig", lbFrontendIPConfigName) } else { zoneStr := strings.Join(lo.FromSlicePtr(previousZone), ",") - klog.V(2).Infof("getFrontendZones for service (%s): lb frontendconfig(%s): setting zone to %s", serviceName, lbFrontendIPConfigName, zoneStr) + logger.V(2).Info("setting zone", "service", serviceName, "lbFrontendIPConfig", lbFrontendIPConfigName, "zone", zoneStr) } fipConfig.Zones = previousZone } @@ -2875,6 +2887,7 @@ func (az *Cloud) getExpectedLBRules( lbName string, isIPv6 bool, ) ([]*armnetwork.Probe, []*armnetwork.LoadBalancingRule, error) { + logger := log.Background().WithName("getExpectedLBRules") var expectedRules []*armnetwork.LoadBalancingRule var expectedProbes []*armnetwork.Probe @@ -2917,7 +2930,7 @@ func (az *Cloud) getExpectedLBRules( consts.IsK8sServiceHasHAModeEnabled(service) { lbRuleName := az.getloadbalancerHAmodeRuleName(service, isIPv6) - klog.V(2).Infof("getExpectedLBRules lb name (%s) rule name (%s)", lbName, lbRuleName) + logger.V(2).Info("", "lb name", lbName, "rule name", lbRuleName) props, err := az.getExpectedHAModeLoadBalancingRuleProperties(service, lbFrontendIPConfigID, lbBackendPoolID) if err != nil { @@ -2958,7 +2971,7 @@ func (az *Cloud) getExpectedLBRules( for _, port := range service.Spec.Ports { lbRuleName := az.getLoadBalancerRuleName(service, port.Protocol, port.Port, isIPv6) - klog.V(2).Infof("getExpectedLBRules lb name (%s) rule name (%s)", lbName, lbRuleName) + logger.V(2).Info("", "lb name", lbName, "rule name", lbRuleName) isNoLBRuleRequired, err := consts.IsLBRuleOnK8sServicePortDisabled(service.Annotations, port.Port) if err != nil { err := fmt.Errorf("failed to parse annotation %s: %w", consts.BuildAnnotationKeyForPort(port.Port, consts.PortAnnotationNoLBRule), err) @@ -2966,7 +2979,7 @@ func (az *Cloud) getExpectedLBRules( "rule-name", lbRuleName, "port", port.Port) } if isNoLBRuleRequired { - klog.V(2).Infof("getExpectedLBRules lb name (%s) rule name (%s) no lb rule required", lbName, lbRuleName) + logger.V(2).Info("no lb rule required", "lb name", lbName, "rule name", lbRuleName) continue } if port.Protocol == v1.ProtocolSCTP && (!az.UseStandardLoadBalancer() || !consts.IsK8sServiceUsingInternalLoadBalancer(service)) { @@ -3497,6 +3510,7 @@ func (az *Cloud) getPublicIPUpdates( serviceAnnotationRequestsNamedPublicIP, isIPv6 bool, ) (bool, []*armnetwork.PublicIPAddress, bool, []*armnetwork.PublicIPAddress, error) { + logger := log.Background().WithName("getPublicIPUpdates") var ( err error discoveredDesiredPublicIP bool @@ -3531,7 +3545,7 @@ func (az *Cloud) getPublicIPUpdates( dirtyPIP, toBeDeleted bool ) if !wantLb && !isUserAssignedPIP { - klog.V(2).Infof("reconcilePublicIP for service(%s): unbinding the service from pip %s", serviceName, *pip.Name) + logger.V(2).Info("reconcilePublicIP for service: unbinding the service from pip", "service", serviceName, "pip", *pip.Name) if serviceReferences, err = unbindServiceFromPIP(pip, serviceName, isUserAssignedPIP); err != nil { return false, nil, false, nil, err } @@ -3572,6 +3586,7 @@ func (az *Cloud) getPublicIPUpdates( // safeDeletePublicIP deletes public IP by removing its reference first. func (az *Cloud) safeDeletePublicIP(ctx context.Context, service *v1.Service, pipResourceGroup string, pip *armnetwork.PublicIPAddress, lb *armnetwork.LoadBalancer) error { + logger := log.Background().WithName("safeDeletePublicIP") // Remove references if pip.IPConfiguration is not nil. if pip.Properties != nil && pip.Properties.IPConfiguration != nil { @@ -3650,12 +3665,12 @@ func (az *Cloud) safeDeletePublicIP(ctx context.Context, service *v1.Service, pi } pipName := ptr.Deref(pip.Name, "") - klog.V(10).Infof("DeletePublicIP(%s, %q): start", pipResourceGroup, pipName) + logger.V(10).Info("start", "pipResourceGroup", pipResourceGroup, "pipName", pipName) err := az.DeletePublicIP(service, pipResourceGroup, pipName) if err != nil { return err } - klog.V(10).Infof("DeletePublicIP(%s, %q): end", pipResourceGroup, pipName) + logger.V(10).Info("end", "pipResourceGroup", pipResourceGroup, "pipName", pipName) return nil } @@ -3816,6 +3831,7 @@ func useSharedSecurityRule(service *v1.Service) bool { // 1. The serviceName is included in the service tags of a system-created pip. // 2. The service LoadBalancerIP matches the IP address of a user-created pip. func serviceOwnsPublicIP(service *v1.Service, pip *armnetwork.PublicIPAddress, clusterName string) (bool, bool) { + logger := log.Background().WithName("serviceOwnsPublicIP") if service == nil || pip == nil { klog.Warningf("serviceOwnsPublicIP: nil service or public IP") return false, false @@ -3835,7 +3851,7 @@ func serviceOwnsPublicIP(service *v1.Service, pip *armnetwork.PublicIPAddress, c if serviceTag == "" { // For user-created PIPs, we need a valid IP address to match against if pip.Properties == nil || ptr.Deref(pip.Properties.IPAddress, "") == "" { - klog.V(4).Infof("serviceOwnsPublicIP: empty pip.Properties.IPAddress for user-created PIP") + logger.V(4).Info("empty pip.Properties.IPAddress for user-created PIP") return false, true } return isServiceSelectPIP(service, pip, isIPv6), true @@ -3857,7 +3873,7 @@ func serviceOwnsPublicIP(service *v1.Service, pip *armnetwork.PublicIPAddress, c // or pip name, this could happen for secondary services // For secondary services, we need a valid IP address to match against if pip.Properties == nil || ptr.Deref(pip.Properties.IPAddress, "") == "" { - klog.V(4).Infof("serviceOwnsPublicIP: empty pip.Properties.IPAddress for secondary service check") + logger.V(4).Info("empty pip.Properties.IPAddress for secondary service check") return false, false } return isServiceSelectPIP(service, pip, isIPv6), false @@ -3866,7 +3882,7 @@ func serviceOwnsPublicIP(service *v1.Service, pip *armnetwork.PublicIPAddress, c // if the pip has no tags, it should be user-created // For user-created PIPs, we need a valid IP address to match against if pip.Properties == nil || ptr.Deref(pip.Properties.IPAddress, "") == "" { - klog.V(4).Infof("serviceOwnsPublicIP: empty pip.Properties.IPAddress for untagged PIP") + logger.V(4).Info("empty pip.Properties.IPAddress for untagged PIP") return false, true } return isServiceSelectPIP(service, pip, isIPv6), true @@ -3919,6 +3935,7 @@ func parsePIPServiceTag(serviceTag *string) []string { // example: // "ns1/svc1" + ["ns1/svc1", "ns2/svc2"] = "ns1/svc1,ns2/svc2" func bindServicesToPIP(pip *armnetwork.PublicIPAddress, incomingServiceNames []string, replace bool) (bool, error) { + logger := log.Background().WithName("bindServicesToPIP") if pip == nil { return false, fmt.Errorf("nil public IP") } @@ -3956,7 +3973,7 @@ func bindServicesToPIP(pip *armnetwork.PublicIPAddress, incomingServiceNames []s *serviceTagValue += fmt.Sprintf(",%s", serviceName) addedNew = true } else { - klog.V(10).Infof("service %s has been bound to the pip already", serviceName) + logger.V(10).Info("service has been bound to the pip already", "service", serviceName) } } } @@ -4081,9 +4098,10 @@ func getMostEligibleLBForService( existingLBs []*armnetwork.LoadBalancer, isInternal bool, ) string { + logger := log.Background().WithName("getMostEligibleLBForService") // 1. If the LB is eligible and being used, choose it. if StringInSlice(currentLBName, eligibleLBs) { - klog.V(4).Infof("getMostEligibleLBForService: choose %s as it is eligible and being used", currentLBName) + logger.V(4).Info("choose LB as it is eligible and being used", "currentLBName", currentLBName) return currentLBName } @@ -4100,7 +4118,7 @@ func getMostEligibleLBForService( } if !found { - klog.V(4).Infof("getMostEligibleLBForService: choose %s as it is eligible and not existing", eligibleLB) + logger.V(4).Info("choose LB as it is eligible and not existing", "eligibleLB", eligibleLB) return eligibleLB } } @@ -4123,7 +4141,7 @@ func getMostEligibleLBForService( } if expectedLBName != "" { - klog.V(4).Infof("getMostEligibleLBForService: choose %s with fewest %d rules", expectedLBName, ruleCount) + logger.V(4).Info("choose LB with fewest rules", "expectedLBName", expectedLBName, "ruleCount", ruleCount) } return trimSuffixIgnoreCase(expectedLBName, consts.InternalLoadBalancerNameSuffix) @@ -4314,10 +4332,11 @@ func (az *Cloud) isLoadBalancerInUseByService(service *v1.Service, lbConfig conf // service. Hence, it can be tracked by the loadBalancer IP. // If the IP version is not empty, which means it is the secondary Service, it returns IP version of the Service FIP. func (az *Cloud) serviceOwnsFrontendIP(ctx context.Context, fip *armnetwork.FrontendIPConfiguration, service *v1.Service) (bool, bool, *armnetwork.IPVersion) { + logger := log.Background().WithName("serviceOwnsFrontendIP") var isPrimaryService bool baseName := az.GetLoadBalancerName(ctx, "", service) if fip != nil && strings.HasPrefix(ptr.Deref(fip.Name, ""), baseName) { - klog.V(6).Infof("serviceOwnsFrontendIP: found primary service %s of the frontend IP config %s", service.Name, *fip.Name) + logger.V(6).Info("found primary service of the frontend IP config", "service", service.Name, "frontendIPConfig", *fip.Name) isPrimaryService = true return true, isPrimaryService, nil } @@ -4357,8 +4376,9 @@ func (az *Cloud) serviceOwnsFrontendIP(ctx context.Context, fip *armnetwork.Fron if publicIPOwnsFrontendIP(service, fip, pip) { return true, isPrimaryService, pip.Properties.PublicIPAddressVersion } - klog.V(6).Infof("serviceOwnsFrontendIP: the public IP with ID %s is being referenced by other service with public IP address %s "+ - "OR it is of incorrect IP version", *pip.ID, *pip.Properties.IPAddress) + logger.V(6).Info("the public IP with ID is being referenced by other service with public IP address"+ + "OR it is of incorrect IP version", + "pipID", *pip.ID, "pipIPAddress", *pip.Properties.IPAddress) } return false, isPrimaryService, nil diff --git a/pkg/provider/azure_loadbalancer_backendpool.go b/pkg/provider/azure_loadbalancer_backendpool.go index 0bbbad0555..0a8e59d6ac 100644 --- a/pkg/provider/azure_loadbalancer_backendpool.go +++ b/pkg/provider/azure_loadbalancer_backendpool.go @@ -83,6 +83,7 @@ func isLBBackendPoolsExisting(lbBackendPoolNames map[bool]string, bpName *string } func (bc *backendPoolTypeNodeIPConfig) CleanupVMSetFromBackendPoolByCondition(ctx context.Context, slb *armnetwork.LoadBalancer, service *v1.Service, _ []*v1.Node, clusterName string, shouldRemoveVMSetFromSLB func(string) bool) (*armnetwork.LoadBalancer, error) { + logger := klog.Background().WithName("bc.CleanupVMSetFromBackendPoolByCondition") v4Enabled, v6Enabled := getIPFamiliesEnabled(service) lbBackendPoolNames := getBackendPoolNames(clusterName) @@ -95,7 +96,7 @@ func (bc *backendPoolTypeNodeIPConfig) CleanupVMSetFromBackendPoolByCondition(ct for j, bp := range newBackendPools { if found, _ := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name); found { - klog.V(2).Infof("bc.CleanupVMSetFromBackendPoolByCondition: checking the backend pool %s from standard load balancer %s", ptr.Deref(bp.Name, ""), ptr.Deref(slb.Name, "")) + logger.V(2).Info("checking the backend pool from standard load balancer", "backendPoolName", ptr.Deref(bp.Name, ""), "lbName", ptr.Deref(slb.Name, "")) if bp.Properties != nil && bp.Properties.BackendIPConfigurations != nil { for i := len(bp.Properties.BackendIPConfigurations) - 1; i >= 0; i-- { ipConf := (bp.Properties.BackendIPConfigurations)[i] @@ -106,7 +107,7 @@ func (bc *backendPoolTypeNodeIPConfig) CleanupVMSetFromBackendPoolByCondition(ct } if shouldRemoveVMSetFromSLB(vmSetName) { - klog.V(2).Infof("bc.CleanupVMSetFromBackendPoolByCondition: found unwanted vmSet %s, decouple it from the LB", vmSetName) + logger.V(2).Info("found unwanted vmSet, decouple it from the LB", "vmSetName", vmSetName) // construct a backendPool that only contains the IP config of the node to be deleted interfaceIPConfigToBeDeleted := &armnetwork.InterfaceIPConfiguration{ ID: ptr.To(ipConfigID), @@ -163,6 +164,7 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools( service *v1.Service, lb *armnetwork.LoadBalancer, ) (bool, bool, *armnetwork.LoadBalancer, error) { + logger := klog.Background().WithName("bc.ReconcileBackendPools") var newBackendPools []*armnetwork.BackendAddressPool var err error if lb.Properties.BackendAddressPools != nil { @@ -187,7 +189,7 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools( bp := newBackendPools[i] found, isIPv6 := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name) if found { - klog.V(10).Infof("bc.ReconcileBackendPools for service (%s): lb backendpool - found wanted backendpool. not adding anything", serviceName) + logger.V(10).Info("lb backendpool - found wanted backendpool. not adding anything", "serviceName", serviceName) foundBackendPools[isBackendPoolIPv6(ptr.Deref(bp.Name, ""))] = true // Don't bother to remove unused nodeIPConfiguration if backend pool is pre configured @@ -221,7 +223,7 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools( nodeName, _, err := bc.VMSet.GetNodeNameByIPConfigurationID(ctx, ipConfID) if err != nil { if errors.Is(err, cloudprovider.InstanceNotFound) { - klog.V(2).Infof("bc.ReconcileBackendPools for service (%s): vm not found for ipConfID %s", serviceName, ipConfID) + logger.V(2).Info("vm not found for ipConfID", "serviceName", serviceName, "ipConfID", ipConfID) bipConfigNotFound = append(bipConfigNotFound, ipConf) } else { return false, false, nil, err @@ -238,7 +240,7 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools( return false, false, nil, err } if shouldExcludeLoadBalancer { - klog.V(2).Infof("bc.ReconcileBackendPools for service (%s): lb backendpool - found unwanted node %s, decouple it from the LB %s", serviceName, nodeName, lbName) + logger.V(2).Info("lb backendpool - found unwanted node, decouple it from the LB", "serviceName", serviceName, "nodeName", nodeName, "lbName", lbName) // construct a backendPool that only contains the IP config of the node to be deleted bipConfigExclude = append(bipConfigExclude, &armnetwork.InterfaceIPConfiguration{ID: ptr.To(ipConfID)}) } @@ -255,7 +257,7 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools( lbBackendPoolIDsSlice = append(lbBackendPoolIDsSlice, lbBackendPoolIDs[isIPv6]) } } else { - klog.V(10).Infof("bc.ReconcileBackendPools for service (%s): lb backendpool - found unmanaged backendpool %s", serviceName, ptr.Deref(bp.Name, "")) + logger.V(10).Info("lb backendpool - found unmanaged backendpool", "serviceName", serviceName, "backendPoolName", ptr.Deref(bp.Name, "")) } } if len(backendpoolToBeDeleted) > 0 { @@ -270,7 +272,7 @@ func (bc *backendPoolTypeNodeIPConfig) ReconcileBackendPools( } if backendPoolsUpdated { - klog.V(4).Infof("bc.ReconcileBackendPools for service(%s): refreshing load balancer %s", serviceName, lbName) + logger.V(4).Info("refreshing load balancer", "serviceName", serviceName, "lbName", lbName) lb, _, err = bc.getAzureLoadBalancer(ctx, lbName, cache.CacheReadTypeForceRefresh) if err != nil { return false, false, nil, fmt.Errorf("bc.ReconcileBackendPools for service (%s): failed to get loadbalancer %s: %w", serviceName, lbName, err) @@ -301,6 +303,7 @@ func getBackendIPConfigurationsToBeDeleted( bp armnetwork.BackendAddressPool, bipConfigNotFound, bipConfigExclude []*armnetwork.InterfaceIPConfiguration, ) []*armnetwork.InterfaceIPConfiguration { + logger := klog.Background().WithName("getBackendIPConfigurationsToBeDeleted") if bp.Properties == nil || bp.Properties.BackendIPConfigurations == nil { return []*armnetwork.InterfaceIPConfiguration{} } @@ -332,13 +335,14 @@ func getBackendIPConfigurationsToBeDeleted( } } if len(unwantedIPConfigs) == len(ipConfigs) { - klog.V(2).Info("getBackendIPConfigurationsToBeDeleted: the pool is empty or will be empty after removing the unwanted IP addresses, skipping the removal") + logger.V(2).Info("the pool is empty or will be empty after removing the unwanted IP addresses, skipping the removal") return bipConfigToBeDeleted } return append(bipConfigToBeDeleted, unwantedIPConfigs...) } func (bc *backendPoolTypeNodeIPConfig) GetBackendPrivateIPs(ctx context.Context, clusterName string, service *v1.Service, lb *armnetwork.LoadBalancer) ([]string, []string) { + logger := klog.Background().WithName("bc.GetBackendPrivateIPs") serviceName := getServiceName(service) lbBackendPoolNames := getBackendPoolNames(clusterName) if lb.Properties == nil || lb.Properties.BackendAddressPools == nil { @@ -349,7 +353,7 @@ func (bc *backendPoolTypeNodeIPConfig) GetBackendPrivateIPs(ctx context.Context, for _, bp := range lb.Properties.BackendAddressPools { found, _ := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name) if found { - klog.V(10).Infof("bc.GetBackendPrivateIPs for service (%s): found wanted backendpool %s", serviceName, ptr.Deref(bp.Name, "")) + logger.V(10).Info("found wanted backendpool", "serviceName", serviceName, "backendPoolName", ptr.Deref(bp.Name, "")) if bp.Properties != nil && bp.Properties.BackendIPConfigurations != nil { for _, backendIPConfig := range bp.Properties.BackendIPConfigurations { ipConfigID := ptr.Deref(backendIPConfig.ID, "") @@ -365,7 +369,7 @@ func (bc *backendPoolTypeNodeIPConfig) GetBackendPrivateIPs(ctx context.Context, } privateIPs := privateIPsSet.UnsortedList() for _, ip := range privateIPs { - klog.V(2).Infof("bc.GetBackendPrivateIPs for service (%s): lb backendpool - found private IPs %s of node %s", serviceName, ip, nodeName) + logger.V(2).Info("lb backendpool - found private IPs of node", "serviceName", serviceName, "ip", ip, "nodeName", nodeName) if utilnet.IsIPv4String(ip) { backendPrivateIPv4s.Insert(ip) } else { @@ -375,7 +379,7 @@ func (bc *backendPoolTypeNodeIPConfig) GetBackendPrivateIPs(ctx context.Context, } } } else { - klog.V(10).Infof("bc.GetBackendPrivateIPs for service (%s): found unmanaged backendpool %s", serviceName, ptr.Deref(bp.Name, "")) + logger.V(10).Info("found unmanaged backendpool", "serviceName", serviceName, "backendPoolName", ptr.Deref(bp.Name, "")) } } return backendPrivateIPv4s.UnsortedList(), backendPrivateIPv6s.UnsortedList() @@ -403,6 +407,7 @@ func (az *Cloud) getVnetResourceID() string { } func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(ctx context.Context, service *v1.Service, nodes []*v1.Node, _, _, clusterName, lbName string, backendPool *armnetwork.BackendAddressPool) error { + logger := klog.Background().WithName("bi.EnsureHostsInPool") if backendPool == nil { backendPool = &armnetwork.BackendAddressPool{} } @@ -420,7 +425,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(ctx context.Context, service key := strings.ToLower(getServiceName(service)) si, found := bi.getLocalServiceInfo(key) if found && !strings.EqualFold(si.lbName, lbName) { - klog.V(4).InfoS("EnsureHostsInPool: the service is not on the load balancer", + logger.V(4).Info("the service is not on the load balancer", "service", key, "previous load balancer", lbName, "current load balancer", si.lbName) @@ -430,7 +435,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(ctx context.Context, service } if isNICPool(backendPool) { - klog.V(4).InfoS("EnsureHostsInPool: skipping NIC-based backend pool", "backendPoolName", ptr.Deref(backendPool.Name, "")) + logger.V(4).Info("skipping NIC-based backend pool", "backendPoolName", ptr.Deref(backendPool.Name, "")) return nil } } @@ -447,7 +452,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(ctx context.Context, service for _, loadBalancerBackendAddress := range backendPool.Properties.LoadBalancerBackendAddresses { if loadBalancerBackendAddress.Properties != nil && loadBalancerBackendAddress.Properties.IPAddress != nil { - klog.V(4).Infof("bi.EnsureHostsInPool: found existing IP %s in the backend pool %s", ptr.Deref(loadBalancerBackendAddress.Properties.IPAddress, ""), lbBackendPoolName) + logger.V(4).Info("found existing IP in the backend pool", "ip", ptr.Deref(loadBalancerBackendAddress.Properties.IPAddress, ""), "backendPoolName", lbBackendPoolName) existingIPs.Insert(ptr.Deref(loadBalancerBackendAddress.Properties.IPAddress, "")) } } @@ -456,7 +461,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(ctx context.Context, service nodePrivateIPsSet := utilsets.NewString() for _, node := range nodes { if isControlPlaneNode(node) { - klog.V(4).Infof("bi.EnsureHostsInPool: skipping control plane node %s", node.Name) + logger.V(4).Info("skipping control plane node", "nodeName", node.Name) continue } @@ -467,14 +472,14 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(ctx context.Context, service if bi.UseMultipleStandardLoadBalancers() { if activeNodes != nil && !activeNodes.Has(node.Name) { - klog.V(4).Infof("bi.EnsureHostsInPool: node %s should not be in load balancer %q", node.Name, lbName) + logger.V(4).Info("node should not be in load balancer", "nodeName", node.Name, "lbName", lbName) continue } } if !existingIPs.Has(privateIP) { name := node.Name - klog.V(6).Infof("bi.EnsureHostsInPool: adding %s with ip address %s", name, privateIP) + logger.V(6).Info("adding node with ip address", "nodeName", name, "ip", privateIP) nodeIPsToBeAdded = append(nodeIPsToBeAdded, privateIP) numOfAdd++ } @@ -485,7 +490,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(ctx context.Context, service for _, loadBalancerBackendAddress := range backendPool.Properties.LoadBalancerBackendAddresses { ip := ptr.Deref(loadBalancerBackendAddress.Properties.IPAddress, "") if !nodePrivateIPsSet.Has(ip) { - klog.V(4).Infof("bi.EnsureHostsInPool: removing IP %s because it is deleted or should be excluded", ip) + logger.V(4).Info("removing IP because it is deleted or should be excluded", "ip", ip) nodeIPsToBeDeleted = append(nodeIPsToBeDeleted, ip) changed = true numOfDelete++ @@ -496,7 +501,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(ctx context.Context, service continue } if !activeNodes.Has(nodeName) { - klog.V(4).Infof("bi.EnsureHostsInPool: removing IP %s because it should not be in this load balancer", ip) + logger.V(4).Info("removing IP because it should not be in this load balancer", "ip", ip) nodeIPsToBeDeleted = append(nodeIPsToBeDeleted, ip) changed = true numOfDelete++ @@ -506,7 +511,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(ctx context.Context, service removeNodeIPAddressesFromBackendPool(backendPool, nodeIPsToBeDeleted, false, bi.UseMultipleStandardLoadBalancers(), true) } if changed { - klog.V(2).Infof("bi.EnsureHostsInPool: updating backend pool %s of load balancer %s to add %d nodes and remove %d nodes", lbBackendPoolName, lbName, numOfAdd, numOfDelete) + logger.V(2).Info("updating backend pool of load balancer to add and remove nodes", "backendPoolName", lbBackendPoolName, "lbName", lbName, "numOfAdd", numOfAdd, "numOfDelete", numOfDelete) if err := bi.CreateOrUpdateLBBackendPool(ctx, lbName, backendPool); err != nil { return fmt.Errorf("bi.EnsureHostsInPool: failed to update backend pool %s: %w", lbBackendPoolName, err) } @@ -516,6 +521,7 @@ func (bi *backendPoolTypeNodeIP) EnsureHostsInPool(ctx context.Context, service } func (bi *backendPoolTypeNodeIP) CleanupVMSetFromBackendPoolByCondition(ctx context.Context, slb *armnetwork.LoadBalancer, _ *v1.Service, nodes []*v1.Node, clusterName string, shouldRemoveVMSetFromSLB func(string) bool) (*armnetwork.LoadBalancer, error) { + logger := klog.Background().WithName("bi.CleanupVMSetFromBackendPoolByCondition") lbBackendPoolNames := getBackendPoolNames(clusterName) newBackendPools := make([]*armnetwork.BackendAddressPool, 0) if slb.Properties != nil && slb.Properties.BackendAddressPools != nil { @@ -526,7 +532,7 @@ func (bi *backendPoolTypeNodeIP) CleanupVMSetFromBackendPoolByCondition(ctx cont for j, bp := range newBackendPools { found, isIPv6 := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name) if found { - klog.V(2).Infof("bi.CleanupVMSetFromBackendPoolByCondition: checking the backend pool %s from standard load balancer %s", ptr.Deref(bp.Name, ""), ptr.Deref(slb.Name, "")) + logger.V(2).Info("checking the backend pool from standard load balancer", "backendPoolName", ptr.Deref(bp.Name, ""), "lbName", ptr.Deref(slb.Name, "")) vmIPsToBeDeleted := utilsets.NewString() for _, node := range nodes { vmSetName, err := bi.VMSet.GetNodeVMSetName(ctx, node) @@ -536,7 +542,7 @@ func (bi *backendPoolTypeNodeIP) CleanupVMSetFromBackendPoolByCondition(ctx cont if shouldRemoveVMSetFromSLB(vmSetName) { privateIP := getNodePrivateIPAddress(node, isIPv6) - klog.V(4).Infof("bi.CleanupVMSetFromBackendPoolByCondition: removing ip %s from the backend pool %s", privateIP, lbBackendPoolNames[isIPv6]) + logger.V(4).Info("removing ip from the backend pool", "ip", privateIP, "backendPoolName", lbBackendPoolNames[isIPv6]) vmIPsToBeDeleted.Insert(privateIP) } } @@ -553,12 +559,12 @@ func (bi *backendPoolTypeNodeIP) CleanupVMSetFromBackendPoolByCondition(ctx cont newBackendPools[j] = bp } else { - klog.V(10).Infof("bi.CleanupVMSetFromBackendPoolByCondition: found unmanaged backendpool %s from standard load balancer %q", ptr.Deref(bp.Name, ""), ptr.Deref(slb.Name, "")) + logger.V(10).Info("found unmanaged backendpool from standard load balancer", "backendPoolName", ptr.Deref(bp.Name, ""), "lbName", ptr.Deref(slb.Name, "")) } } for isIPv6 := range updatedPrivateIPs { - klog.V(2).Infof("bi.CleanupVMSetFromBackendPoolByCondition: updating lb %s since there are private IP updates", ptr.Deref(slb.Name, "")) + logger.V(2).Info("updating lb since there are private IP updates", "lbName", ptr.Deref(slb.Name, "")) slb.Properties.BackendAddressPools = newBackendPools for _, backendAddressPool := range slb.Properties.BackendAddressPools { @@ -575,6 +581,7 @@ func (bi *backendPoolTypeNodeIP) CleanupVMSetFromBackendPoolByCondition(ctx cont } func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(ctx context.Context, clusterName string, service *v1.Service, lb *armnetwork.LoadBalancer) (bool, bool, *armnetwork.LoadBalancer, error) { + logger := klog.Background().WithName("bi.ReconcileBackendPools") var newBackendPools []*armnetwork.BackendAddressPool if lb.Properties.BackendAddressPools != nil { newBackendPools = lb.Properties.BackendAddressPools @@ -602,22 +609,22 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(ctx context.Context, clus found, isIPv6 := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name) if found { bpIdxes = append(bpIdxes, i) - klog.V(10).Infof("bi.ReconcileBackendPools for service (%s): found wanted backendpool. Not adding anything", serviceName) + logger.V(10).Info("found wanted backendpool. Not adding anything", "serviceName", serviceName) foundBackendPools[isIPv6] = true lbBackendPoolIDsSlice = append(lbBackendPoolIDsSlice, lbBackendPoolIDs[isIPv6]) if nicsCount := countNICsOnBackendPool(bp); nicsCount > 0 { nicsCountMap[ptr.Deref(bp.Name, "")] = nicsCount - klog.V(4).Infof( - "bi.ReconcileBackendPools for service(%s): found NIC-based backendpool %s with %d NICs, will migrate to IP-based", - serviceName, - ptr.Deref(bp.Name, ""), - nicsCount, + logger.V(4).Info( + "found NIC-based backendpool with NICs, will migrate to IP-based", + "serviceName", serviceName, + "backendPoolName", ptr.Deref(bp.Name, ""), + "nicsCount", nicsCount, ) isMigration = true } } else { - klog.V(10).Infof("bi.ReconcileBackendPools for service (%s): found unmanaged backendpool %s", serviceName, *bp.Name) + logger.V(10).Info("found unmanaged backendpool", "serviceName", serviceName, "backendPoolName", *bp.Name) } } @@ -660,7 +667,7 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(ctx context.Context, clus // VMs during the migration. // 3. Decouple vmss from the lb if the backend pool is empty when using // ip-based LB. Ref: https://github.com/kubernetes-sigs/cloud-provider-azure/pull/2829. - klog.V(2).Infof("bi.ReconcileBackendPools for service (%s) and vmSet (%s): ensuring the LB is decoupled from the VMSet", serviceName, vmSetName) + logger.V(2).Info("ensuring the LB is decoupled from the VMSet", "serviceName", serviceName, "vmSetName", vmSetName) shouldRefreshLB, err = bi.VMSet.EnsureBackendPoolDeleted(ctx, service, lbBackendPoolIDsSlice, vmSetName, lb.Properties.BackendAddressPools, true) if err != nil { klog.Errorf("bi.ReconcileBackendPools for service (%s): failed to EnsureBackendPoolDeleted: %s", serviceName, err.Error()) @@ -672,7 +679,7 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(ctx context.Context, clus var nodeIPAddressesToBeDeleted []string for _, nodeName := range bi.excludeLoadBalancerNodes.UnsortedList() { for _, ip := range bi.nodePrivateIPs[strings.ToLower(nodeName)].UnsortedList() { - klog.V(2).Infof("bi.ReconcileBackendPools for service (%s): found unwanted node private IP %s, decouple it from the LB %s", serviceName, ip, lbName) + logger.V(2).Info("found unwanted node private IP, decouple it from the LB", "serviceName", serviceName, "ip", ip, "lbName", lbName) nodeIPAddressesToBeDeleted = append(nodeIPAddressesToBeDeleted, ip) } } @@ -718,7 +725,7 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(ctx context.Context, clus shouldRefreshLB = shouldRefreshLB || isMigration if shouldRefreshLB { - klog.V(4).Infof("bi.ReconcileBackendPools for service(%s): refreshing load balancer %s", serviceName, lbName) + logger.V(4).Info("refreshing load balancer", "serviceName", serviceName, "lbName", lbName) lb, _, err = bi.getAzureLoadBalancer(ctx, lbName, cache.CacheReadTypeForceRefresh) if err != nil { return false, false, nil, fmt.Errorf("bi.ReconcileBackendPools for service (%s): failed to get loadbalancer %s: %w", serviceName, lbName, err) @@ -746,6 +753,7 @@ func (bi *backendPoolTypeNodeIP) ReconcileBackendPools(ctx context.Context, clus } func (bi *backendPoolTypeNodeIP) GetBackendPrivateIPs(_ context.Context, clusterName string, service *v1.Service, lb *armnetwork.LoadBalancer) ([]string, []string) { + logger := klog.Background().WithName("GetBackendPrivateIPs") serviceName := getServiceName(service) lbBackendPoolNames := bi.getBackendPoolNamesForService(service, clusterName) if lb.Properties == nil || lb.Properties.BackendAddressPools == nil { @@ -756,24 +764,24 @@ func (bi *backendPoolTypeNodeIP) GetBackendPrivateIPs(_ context.Context, cluster for _, bp := range lb.Properties.BackendAddressPools { found, _ := isLBBackendPoolsExisting(lbBackendPoolNames, bp.Name) if found { - klog.V(10).Infof("bi.GetBackendPrivateIPs for service (%s): found wanted backendpool %s", serviceName, ptr.Deref(bp.Name, "")) + logger.V(10).Info("found wanted backendpool", "serviceName", serviceName, "backendPoolName", ptr.Deref(bp.Name, "")) if bp.Properties != nil && bp.Properties.LoadBalancerBackendAddresses != nil { for _, backendAddress := range bp.Properties.LoadBalancerBackendAddresses { ipAddress := backendAddress.Properties.IPAddress if ipAddress != nil { - klog.V(2).Infof("bi.GetBackendPrivateIPs for service (%s): lb backendpool - found private IP %q", serviceName, *ipAddress) + logger.V(2).Info("lb backendpool - found private IP", "serviceName", serviceName, "ip", *ipAddress) if utilnet.IsIPv4String(*ipAddress) { backendPrivateIPv4s.Insert(*ipAddress) } else if utilnet.IsIPv6String(*ipAddress) { backendPrivateIPv6s.Insert(*ipAddress) } } else { - klog.V(4).Infof("bi.GetBackendPrivateIPs for service (%s): lb backendpool - found null private IP", serviceName) + logger.V(4).Info("lb backendpool - found null private IP", "serviceName", serviceName) } } } } else { - klog.V(10).Infof("bi.GetBackendPrivateIPs for service (%s): found unmanaged backendpool %s", serviceName, ptr.Deref(bp.Name, "")) + logger.V(10).Info("found unmanaged backendpool", "serviceName", serviceName, "backendPoolName", ptr.Deref(bp.Name, "")) } } return backendPrivateIPv4s.UnsortedList(), backendPrivateIPv6s.UnsortedList() @@ -794,11 +802,12 @@ func (bi *backendPoolTypeNodeIP) getBackendPoolNodeNames(bp *armnetwork.BackendA } func newBackendPool(lb *armnetwork.LoadBalancer, isBackendPoolPreConfigured bool, preConfiguredBackendPoolLoadBalancerTypes, serviceName, lbBackendPoolName string) bool { + logger := klog.Background().WithName("newBackendPool") if isBackendPoolPreConfigured { - klog.V(2).Infof("newBackendPool for service (%s)(true): lb backendpool - PreConfiguredBackendPoolLoadBalancerTypes %s has been set but can not find corresponding backend pool %q, ignoring it", - serviceName, - preConfiguredBackendPoolLoadBalancerTypes, - lbBackendPoolName) + logger.V(2).Info("lb backendpool - PreConfiguredBackendPoolLoadBalancerTypes has been set but can not find corresponding backend pool, ignoring it", + "serviceName", serviceName, + "preConfiguredBackendPoolLoadBalancerTypes", preConfiguredBackendPoolLoadBalancerTypes, + "backendPoolName", lbBackendPoolName) isBackendPoolPreConfigured = false } @@ -815,6 +824,7 @@ func newBackendPool(lb *armnetwork.LoadBalancer, isBackendPoolPreConfigured bool } func (az *Cloud) addNodeIPAddressesToBackendPool(backendPool *armnetwork.BackendAddressPool, nodeIPAddresses []string) bool { + logger := klog.Background().WithName("bi.addNodeIPAddressesToBackendPool") vnetID := az.getVnetResourceID() if backendPool.Properties != nil { if backendPool.Properties.VirtualNetwork == nil || @@ -841,7 +851,7 @@ func (az *Cloud) addNodeIPAddressesToBackendPool(backendPool *armnetwork.Backend for _, ipAddress := range nodeIPAddresses { if !hasIPAddressInBackendPool(backendPool, ipAddress) { name := az.nodePrivateIPToNodeNameMap[ipAddress] - klog.V(4).Infof("bi.addNodeIPAddressesToBackendPool: adding %s to the backend pool %s", ipAddress, ptr.Deref(backendPool.Name, "")) + logger.V(4).Info("adding node to the backend pool", "ip", ipAddress, "backendPoolName", ptr.Deref(backendPool.Name, "")) addresses = append(addresses, &armnetwork.LoadBalancerBackendAddress{ Name: ptr.To(name), Properties: &armnetwork.LoadBalancerBackendAddressPropertiesFormat{ @@ -901,7 +911,7 @@ func removeNodeIPAddressesFromBackendPool( continue } if removeAll || nodeIPsSet.Has(ipAddress) { - klog.V(4).Infof("removeNodeIPAddressFromBackendPool: removing %s from the backend pool %s", ipAddress, ptr.Deref(backendPool.Name, "")) + logger.V(4).Info("removing IP from the backend pool", "ip", ipAddress, "backendPoolName", ptr.Deref(backendPool.Name, "")) addresses = append(addresses[:i], addresses[i+1:]...) changed = true } @@ -916,7 +926,7 @@ func removeNodeIPAddressesFromBackendPool( // Allow the pool to be empty when EnsureHostsInPool for multiple standard load balancers clusters, // or one node could occur in multiple backend pools. if len(addresses) == 0 && !UseMultipleStandardLoadBalancers { - klog.V(2).Info("removeNodeIPAddressFromBackendPool: the pool is empty or will be empty after removing the unwanted IP addresses, skipping the removal") + logger.V(2).Info("the pool is empty or will be empty after removing the unwanted IP addresses, skipping the removal") changed = false } else if changed { backendPool.Properties.LoadBalancerBackendAddresses = addresses diff --git a/pkg/provider/azure_loadbalancer_healthprobe.go b/pkg/provider/azure_loadbalancer_healthprobe.go index 26b0125721..87d7087dc2 100644 --- a/pkg/provider/azure_loadbalancer_healthprobe.go +++ b/pkg/provider/azure_loadbalancer_healthprobe.go @@ -48,8 +48,9 @@ func (az *Cloud) buildClusterServiceSharedProbe() *armnetwork.Probe { // for following protocols: TCP HTTP HTTPS(SLB only) // return nil if no new probe is added func (az *Cloud) buildHealthProbeRulesForPort(serviceManifest *v1.Service, port v1.ServicePort, lbrule string, healthCheckNodePortProbe *armnetwork.Probe, useSharedProbe bool) (*armnetwork.Probe, error) { + logger := klog.Background().WithName("buildHealthProbeRulesForPort") if useSharedProbe { - klog.V(4).Infof("skip creating health probe for port %d because the shared probe is used", port.Port) + logger.V(4).Info("skip creating health probe for port because the shared probe is used", "port", port.Port) return nil, nil } @@ -299,6 +300,7 @@ func (az *Cloud) keepSharedProbe( expectedProbes []*armnetwork.Probe, wantLB bool, ) ([]*armnetwork.Probe, error) { + logger := klog.Background().WithName("keepSharedProbe") var shouldConsiderRemoveSharedProbe bool if !wantLB { shouldConsiderRemoveSharedProbe = true @@ -321,8 +323,8 @@ func (az *Cloud) keepSharedProbe( // If the service owns the rule and is now a local service, // it means the service was switched from Cluster to Local if az.serviceOwnsRule(service, ruleName) && isLocalService(service) { - klog.V(2).Infof("service %s has switched from Cluster to Local, removing shared probe", - getServiceName(service)) + logger.V(2).Info("service has switched from Cluster to Local, removing shared probe", + "serviceName", getServiceName(service)) // Remove the shared probe from the load balancer directly if lb.Properties != nil && lb.Properties.Probes != nil && i < len(lb.Properties.Probes) { lb.Properties.Probes = append(lb.Properties.Probes[:i], lb.Properties.Probes[i+1:]...) @@ -340,7 +342,8 @@ func (az *Cloud) keepSharedProbe( return []*armnetwork.Probe{}, err } if !az.serviceOwnsRule(service, ruleName) && shouldConsiderRemoveSharedProbe { - klog.V(4).Infof("there are load balancing rule %s of another service referencing the health probe %s, so the health probe should not be removed", *rule.ID, *probe.ID) + logger.V(4).Info("there are load balancing rule of another service referencing the health probe, so the health probe should not be removed", + "ruleID", *rule.ID, "probeID", *probe.ID) sharedProbe := az.buildClusterServiceSharedProbe() expectedProbes = append(expectedProbes, sharedProbe) return expectedProbes, nil diff --git a/pkg/provider/azure_loadbalancer_repo.go b/pkg/provider/azure_loadbalancer_repo.go index c8951d3018..3d11842cda 100644 --- a/pkg/provider/azure_loadbalancer_repo.go +++ b/pkg/provider/azure_loadbalancer_repo.go @@ -35,6 +35,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/util/errutils" utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) @@ -56,6 +57,7 @@ func (az *Cloud) DeleteLB(ctx context.Context, service *v1.Service, lbName strin // ListLB invokes az.NetworkClientFactory.GetLoadBalancerClient().List with exponential backoff retry func (az *Cloud) ListLB(ctx context.Context, service *v1.Service) ([]*armnetwork.LoadBalancer, error) { + logger := log.Background().WithName("ListLB") rgName := az.getLoadBalancerResourceGroup() allLBs, rerr := az.NetworkClientFactory.GetLoadBalancerClient().List(ctx, rgName) if rerr != nil { @@ -66,13 +68,14 @@ func (az *Cloud) ListLB(ctx context.Context, service *v1.Service) ([]*armnetwork klog.Errorf("LoadbalancerClient.List(%v) failure with err=%v", rgName, rerr) return nil, rerr } - klog.V(2).Infof("LoadbalancerClient.List(%v) success", rgName) + logger.V(2).Info("LoadbalancerClient.List success", "resourceGroup", rgName) return allLBs, nil } // ListManagedLBs invokes az.NetworkClientFactory.GetLoadBalancerClient().List and filter out // those that are not managed by cloud provider azure or not associated to a managed VMSet. func (az *Cloud) ListManagedLBs(ctx context.Context, service *v1.Service, nodes []*v1.Node, clusterName string) ([]*armnetwork.LoadBalancer, error) { + logger := log.Background().WithName("ListManagedLBs") allLBs, err := az.ListLB(ctx, service) if err != nil { return nil, err @@ -88,7 +91,7 @@ func (az *Cloud) ListManagedLBs(ctx context.Context, service *v1.Service, nodes if strings.EqualFold(az.LoadBalancerSKU, consts.LoadBalancerSKUBasic) { // return early if wantLb=false if nodes == nil { - klog.V(4).Infof("ListManagedLBs: return all LBs in the resource group %s, including unmanaged LBs", az.getLoadBalancerResourceGroup()) + logger.V(4).Info("return all LBs in the resource group, including unmanaged LBs", "resourceGroup", az.getLoadBalancerResourceGroup()) return allLBs, nil } @@ -100,7 +103,7 @@ func (az *Cloud) ListManagedLBs(ctx context.Context, service *v1.Service, nodes if len(agentPoolVMSetNames) > 0 { for _, vmSetName := range agentPoolVMSetNames { - klog.V(6).Infof("ListManagedLBs: found agent pool vmSet name %s", *vmSetName) + logger.V(6).Info("found agent pool", "vmSet Name", *vmSetName) agentPoolVMSetNamesMap[strings.ToLower(*vmSetName)] = true } } @@ -119,7 +122,7 @@ func (az *Cloud) ListManagedLBs(ctx context.Context, service *v1.Service, nodes for _, lb := range allLBs { if managedLBNames.Has(trimSuffixIgnoreCase(ptr.Deref(lb.Name, ""), consts.InternalLoadBalancerNameSuffix)) { managedLBs = append(managedLBs, lb) - klog.V(4).Infof("ListManagedLBs: found managed LB %s", ptr.Deref(lb.Name, "")) + logger.V(4).Info("found managed LB", "loadBalancerName", ptr.Deref(lb.Name, "")) } } @@ -128,11 +131,12 @@ func (az *Cloud) ListManagedLBs(ctx context.Context, service *v1.Service, nodes // CreateOrUpdateLB invokes az.NetworkClientFactory.GetLoadBalancerClient().CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdateLB(ctx context.Context, service *v1.Service, lb armnetwork.LoadBalancer) error { + logger := log.Background().WithName("CreateOrUpdateLB") lb = cleanupSubnetInFrontendIPConfigurations(&lb) rgName := az.getLoadBalancerResourceGroup() _, err := az.NetworkClientFactory.GetLoadBalancerClient().CreateOrUpdate(ctx, rgName, ptr.Deref(lb.Name, ""), lb) - klog.V(10).Infof("LoadbalancerClient.CreateOrUpdate(%s): end", *lb.Name) + logger.V(10).Info("LoadbalancerClient.CreateOrUpdate: end", "loadBalancerName", *lb.Name) if err == nil { // Invalidate the cache right after updating _ = az.lbCache.Delete(*lb.Name) @@ -147,14 +151,14 @@ func (az *Cloud) CreateOrUpdateLB(ctx context.Context, service *v1.Service, lb a } // Invalidate the cache because ETAG precondition mismatch. if rerr.StatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", ptr.Deref(lb.Name, "")) + logger.V(3).Info("LoadBalancer cache is cleanup because of http.StatusPreconditionFailed", "loadBalancerName", ptr.Deref(lb.Name, "")) _ = az.lbCache.Delete(*lb.Name) } retryErrorMessage := rerr.Error() // Invalidate the cache because another new operation has canceled the current request. if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", ptr.Deref(lb.Name, "")) + logger.V(3).Info("LoadBalancer cache is cleanup because CreateOrUpdate is canceled by another operation", "loadBalancerName", ptr.Deref(lb.Name, "")) _ = az.lbCache.Delete(*lb.Name) } @@ -166,7 +170,7 @@ func (az *Cloud) CreateOrUpdateLB(ctx context.Context, service *v1.Service, lb a return rerr } pipRG, pipName := matches[1], matches[2] - klog.V(3).Infof("The public IP %s referenced by load balancer %s is not in Succeeded provisioning state, will try to update it", pipName, ptr.Deref(lb.Name, "")) + logger.V(3).Info("The public IP referenced by load balancer is not in Succeeded provisioning state, will try to update it", "pipName", pipName, "loadBalancerName", ptr.Deref(lb.Name, "")) pip, _, err := az.getPublicIPAddress(ctx, pipRG, pipName, azcache.CacheReadTypeDefault) if err != nil { klog.Errorf("Failed to get the public IP %s in resource group %s: %v", pipName, pipRG, err) @@ -187,7 +191,8 @@ func (az *Cloud) CreateOrUpdateLB(ctx context.Context, service *v1.Service, lb a } func (az *Cloud) CreateOrUpdateLBBackendPool(ctx context.Context, lbName string, backendPool *armnetwork.BackendAddressPool) error { - klog.V(4).Infof("CreateOrUpdateLBBackendPool: updating backend pool %s in LB %s", ptr.Deref(backendPool.Name, ""), lbName) + logger := log.Background().WithName("CreateOrUpdateLBBackendPool") + logger.V(4).Info("updating backend pool in LB", "backendPoolName", ptr.Deref(backendPool.Name, ""), "loadBalancerName", lbName) _, err := az.NetworkClientFactory.GetBackendAddressPoolClient().CreateOrUpdate(ctx, az.getLoadBalancerResourceGroup(), lbName, ptr.Deref(backendPool.Name, ""), *backendPool) if err == nil { // Invalidate the cache right after updating @@ -201,14 +206,14 @@ func (az *Cloud) CreateOrUpdateLBBackendPool(ctx context.Context, lbName string, // Invalidate the cache because ETAG precondition mismatch. if rerr.StatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", lbName) + logger.V(3).Info("LoadBalancer cache is cleanup because of http.StatusPreconditionFailed", "loadBalancerName", lbName) _ = az.lbCache.Delete(lbName) } retryErrorMessage := rerr.Error() // Invalidate the cache because another new operation has canceled the current request. if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", lbName) + logger.V(3).Info("LoadBalancer cache is cleanup because CreateOrUpdate is canceled by another operation", "loadBalancerName", lbName) _ = az.lbCache.Delete(lbName) } @@ -216,7 +221,8 @@ func (az *Cloud) CreateOrUpdateLBBackendPool(ctx context.Context, lbName string, } func (az *Cloud) DeleteLBBackendPool(ctx context.Context, lbName, backendPoolName string) error { - klog.V(4).Infof("DeleteLBBackendPool: deleting backend pool %s in LB %s", backendPoolName, lbName) + logger := log.Background().WithName("DeleteLBBackendPool") + logger.V(4).Info("deleting backend pool in LB", "backendPoolName", backendPoolName, "loadBalancerName", lbName) err := az.NetworkClientFactory.GetBackendAddressPoolClient().Delete(ctx, az.getLoadBalancerResourceGroup(), lbName, backendPoolName) if err == nil { // Invalidate the cache right after updating @@ -230,14 +236,14 @@ func (az *Cloud) DeleteLBBackendPool(ctx context.Context, lbName, backendPoolNam } // Invalidate the cache because ETAG precondition mismatch. if rerr.StatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because of http.StatusPreconditionFailed", lbName) + logger.V(3).Info("LoadBalancer cache is cleanup because of http.StatusPreconditionFailed", "loadBalancerName", lbName) _ = az.lbCache.Delete(lbName) } retryErrorMessage := rerr.Error() // Invalidate the cache because another new operation has canceled the current request. if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("LoadBalancer cache for %s is cleanup because CreateOrUpdate is canceled by another operation", lbName) + logger.V(3).Info("LoadBalancer cache is cleanup because CreateOrUpdate is canceled by another operation", "loadBalancerName", lbName) _ = az.lbCache.Delete(lbName) } @@ -278,6 +284,7 @@ func (az *Cloud) MigrateToIPBasedBackendPoolAndWaitForCompletion( ctx context.Context, lbName string, backendPoolNames []string, nicsCountMap map[string]int, ) error { + logger := log.Background().WithName("MigrateToIPBasedBackendPoolAndWaitForCompletion") if _, rerr := az.NetworkClientFactory.GetLoadBalancerClient().MigrateToIPBased(ctx, az.ResourceGroup, lbName, &armnetwork.LoadBalancersClientMigrateToIPBasedOptions{ Parameters: &armnetwork.MigrateLoadBalancerToIPBasedRequest{ Pools: to.SliceOfPtrs(backendPoolNames...), @@ -306,7 +313,7 @@ func (az *Cloud) MigrateToIPBasedBackendPoolAndWaitForCompletion( } if countIPsOnBackendPool(bp) != nicsCount { - klog.V(4).Infof("MigrateToIPBasedBackendPoolAndWaitForCompletion: Expected IPs %d, current IPs %d, will retry in 5s", nicsCount, countIPsOnBackendPool(bp)) + logger.V(4).Info("Expected IPs vs current IPs, will retry in 5s", "expectedIPs", nicsCount, "currentIPs", countIPsOnBackendPool(bp)) return false, nil } succeeded[bpName] = true @@ -327,6 +334,7 @@ func (az *Cloud) MigrateToIPBasedBackendPoolAndWaitForCompletion( } func (az *Cloud) newLBCache() (azcache.Resource, error) { + logger := log.Background().WithName("newLBCache") getter := func(ctx context.Context, key string) (interface{}, error) { lb, err := az.NetworkClientFactory.GetLoadBalancerClient().Get(ctx, az.getLoadBalancerResourceGroup(), key, nil) exists, rerr := checkResourceExistsFromError(err) @@ -335,7 +343,7 @@ func (az *Cloud) newLBCache() (azcache.Resource, error) { } if !exists { - klog.V(2).Infof("Load balancer %q not found", key) + logger.V(2).Info("Load balancer not found", "loadBalancerName", key) return nil, nil } @@ -417,6 +425,7 @@ func isNICPool(bp *armnetwork.BackendAddressPool) bool { func (az *Cloud) cleanupBasicLoadBalancer( ctx context.Context, clusterName string, service *v1.Service, existingLBs []*armnetwork.LoadBalancer, ) ([]*armnetwork.LoadBalancer, error) { + logger := log.Background().WithName("cleanupBasicLoadBalancer") if !az.UseStandardLoadBalancer() { return existingLBs, nil } @@ -425,7 +434,7 @@ func (az *Cloud) cleanupBasicLoadBalancer( for i := len(existingLBs) - 1; i >= 0; i-- { lb := existingLBs[i] if lb != nil && lb.SKU != nil && lb.SKU.Name != nil && *lb.SKU.Name == armnetwork.LoadBalancerSKUNameBasic { - klog.V(2).Infof("cleanupBasicLoadBalancer: found basic load balancer %q, removing it", *lb.Name) + logger.V(2).Info("found basic load balancer, removing it", "loadBalancerName", *lb.Name) if err := az.safeDeleteLoadBalancer(ctx, *lb, clusterName, service); err != nil { klog.ErrorS(err, "cleanupBasicLoadBalancer: failed to delete outdated basic load balancer", "loadBalancerName", *lb.Name) return nil, err diff --git a/pkg/provider/azure_local_services.go b/pkg/provider/azure_local_services.go index 3c76539241..007e725fb0 100644 --- a/pkg/provider/azure_local_services.go +++ b/pkg/provider/azure_local_services.go @@ -90,13 +90,13 @@ func newLoadBalancerBackendPoolUpdater(az *Cloud, interval time.Duration) *loadB // run starts the loadBalancerBackendPoolUpdater, and stops if the context exits. func (updater *loadBalancerBackendPoolUpdater) run(ctx context.Context) { - logger := log.Background().WithName("run") - klog.V(2).Info("loadBalancerBackendPoolUpdater.run: started") + logger := log.Background().WithName("loadBalancerBackendPoolUpdater.run") + logger.V(2).Info("started") err := wait.PollUntilContextCancel(ctx, updater.interval, false, func(ctx context.Context) (bool, error) { updater.process(ctx) return false, nil }) - logger.Error(err, "loadBalancerBackendPoolUpdater.run: stopped") + logger.Error(err, "stopped") } // getAddIPsToBackendPoolOperation creates a new loadBalancerBackendPoolUpdateOperation @@ -125,11 +125,12 @@ func getRemoveIPsFromBackendPoolOperation(serviceName, loadBalancerName, backend // addOperation adds an operation to the loadBalancerBackendPoolUpdater. func (updater *loadBalancerBackendPoolUpdater) addOperation(operation batchOperation) batchOperation { + logger := log.Background().WithName("loadBalancerBackendPoolUpdater.addOperation") updater.lock.Lock() defer updater.lock.Unlock() op := operation.(*loadBalancerBackendPoolUpdateOperation) - klog.V(4).InfoS("loadBalancerBackendPoolUpdater.addOperation", + logger.V(4).Info("", "kind", op.kind, "service name", op.serviceName, "load balancer name", op.loadBalancerName, @@ -141,13 +142,14 @@ func (updater *loadBalancerBackendPoolUpdater) addOperation(operation batchOpera // removeOperation removes all operations targeting to the specified service. func (updater *loadBalancerBackendPoolUpdater) removeOperation(serviceName string) { + logger := log.Background().WithName("loadBalancerBackendPoolUpdater.removeOperation") updater.lock.Lock() defer updater.lock.Unlock() for i := len(updater.operations) - 1; i >= 0; i-- { op := updater.operations[i].(*loadBalancerBackendPoolUpdateOperation) if strings.EqualFold(op.serviceName, serviceName) { - klog.V(4).InfoS("loadBalancerBackendPoolUpdater.removeOperation", + logger.V(4).Info("", "kind", op.kind, "service name", op.serviceName, "load balancer name", op.loadBalancerName, @@ -164,11 +166,12 @@ func (updater *loadBalancerBackendPoolUpdater) removeOperation(serviceName strin // if it is retriable, otherwise all operations in the batch targeting to // this backend pool will fail. func (updater *loadBalancerBackendPoolUpdater) process(ctx context.Context) { + logger := log.Background().WithName("loadBalancerBackendPoolUpdater.process") updater.lock.Lock() defer updater.lock.Unlock() if len(updater.operations) == 0 { - klog.V(4).Infof("loadBalancerBackendPoolUpdater.process: no operations to process") + logger.V(4).Info("no operations to process") return } @@ -178,11 +181,11 @@ func (updater *loadBalancerBackendPoolUpdater) process(ctx context.Context) { lbOp := op.(*loadBalancerBackendPoolUpdateOperation) si, found := updater.az.getLocalServiceInfo(strings.ToLower(lbOp.serviceName)) if !found { - klog.V(4).Infof("loadBalancerBackendPoolUpdater.process: service %s is not a local service, skip the operation", lbOp.serviceName) + logger.V(4).Info("service is not a local service, skip the operation", "service", lbOp.serviceName) continue } if !strings.EqualFold(si.lbName, lbOp.loadBalancerName) { - klog.V(4).InfoS("loadBalancerBackendPoolUpdater.process: service is not associated with the load balancer, skip the operation", + logger.V(4).Info("service is not associated with the load balancer, skip the operation", "service", lbOp.serviceName, "previous load balancer", lbOp.loadBalancerName, "current load balancer", si.lbName) @@ -236,7 +239,7 @@ func (updater *loadBalancerBackendPoolUpdater) process(ctx context.Context) { // To keep the code clean, ignore the case when `changed` is true // but the backend pool object is not changed after multiple times of removal and re-adding. if changed { - klog.V(2).Infof("loadBalancerBackendPoolUpdater.process: updating backend pool %s/%s", lbName, poolName) + logger.V(2).Info("updating backend pool", "loadBalancer", lbName, "backendPool", poolName) _, err = updater.az.NetworkClientFactory.GetBackendAddressPoolClient().CreateOrUpdate(ctx, updater.az.ResourceGroup, lbName, poolName, *bp) if err != nil { updater.processError(err, operationName, ops...) @@ -255,8 +258,9 @@ func (updater *loadBalancerBackendPoolUpdater) processError( operationName string, operations ...batchOperation, ) { + logger := log.Background().WithName("loadBalancerBackendPoolUpdater.processError") if exists, err := errutils.CheckResourceExistsFromAzcoreError(rerr); !exists && err == nil { - klog.V(4).Infof("backend pool not found for operation %s, skip updating", operationName) + logger.V(4).Info("backend pool not found for operation, skip updating", "operation", operationName) return } @@ -300,6 +304,7 @@ func (az *Cloud) getLocalServiceInfo(serviceName string) (*serviceInfo, bool) { // setUpEndpointSlicesInformer creates an informer for EndpointSlices of local services. // It watches the update events and send backend pool update operations to the batch updater. func (az *Cloud) setUpEndpointSlicesInformer(informerFactory informers.SharedInformerFactory) { + logger := log.Background().WithName("setUpEndpointSlicesInformer") endpointSlicesInformer := informerFactory.Discovery().V1().EndpointSlices().Informer() _, _ = endpointSlicesInformer.AddEventHandler( cache.ResourceEventHandlerFuncs{ @@ -313,17 +318,17 @@ func (az *Cloud) setUpEndpointSlicesInformer(informerFactory informers.SharedInf svcName := getServiceNameOfEndpointSlice(newES) if svcName == "" { - klog.V(4).Infof("EndpointSlice %s/%s does not have service name label, skip updating load balancer backend pool", newES.Namespace, newES.Name) + logger.V(4).Info("EndpointSlice does not have service name label, skip updating load balancer backend pool", "namespace", newES.Namespace, "name", newES.Name) return } - klog.V(4).Infof("Detecting EndpointSlice %s/%s update", newES.Namespace, newES.Name) + logger.V(4).Info("Detecting EndpointSlice update", "namespace", newES.Namespace, "name", newES.Name) az.endpointSlicesCache.Store(strings.ToLower(fmt.Sprintf("%s/%s", newES.Namespace, newES.Name)), newES) key := strings.ToLower(fmt.Sprintf("%s/%s", newES.Namespace, svcName)) si, found := az.getLocalServiceInfo(key) if !found { - klog.V(4).Infof("EndpointSlice %s/%s belongs to service %s, but the service is not a local service, or has not finished the initial reconciliation loop. Skip updating load balancer backend pool", newES.Namespace, newES.Name, key) + logger.V(4).Info("EndpointSlice belongs to service, but the service is not a local service, or has not finished the initial reconciliation loop. Skip updating load balancer backend pool", "namespace", newES.Namespace, "name", newES.Name, "service", key) return } lbName, ipFamily := si.lbName, si.ipFamily @@ -493,6 +498,7 @@ func newServiceInfo(ipFamily, lbName string) *serviceInfo { // getLocalServiceEndpointsNodeNames gets the node names that host all endpoints of the local service. func (az *Cloud) getLocalServiceEndpointsNodeNames(service *v1.Service) *utilsets.IgnoreCaseSet { + logger := log.Background().WithName("getLocalServiceEndpointsNodeNames") var eps []*discovery_v1.EndpointSlice az.endpointSlicesCache.Range(func(_, value interface{}) bool { endpointSlice := value.(*discovery_v1.EndpointSlice) @@ -510,7 +516,7 @@ func (az *Cloud) getLocalServiceEndpointsNodeNames(service *v1.Service) *utilset var nodeNames []string for _, ep := range eps { for _, endpoint := range ep.Endpoints { - klog.V(4).Infof("EndpointSlice %s/%s has endpoint %s on node %s", ep.Namespace, ep.Name, endpoint.Addresses, ptr.Deref(endpoint.NodeName, "")) + logger.V(4).Info("EndpointSlice has endpoint on node", "namespace", ep.Namespace, "name", ep.Name, "addresses", endpoint.Addresses, "nodeName", ptr.Deref(endpoint.NodeName, "")) nodeNames = append(nodeNames, ptr.Deref(endpoint.NodeName, "")) } } @@ -527,6 +533,7 @@ func (az *Cloud) cleanupLocalServiceBackendPool( lbs []*armnetwork.LoadBalancer, clusterName string, ) (newLBs []*armnetwork.LoadBalancer, err error) { + logger := log.Background().WithName("cleanupLocalServiceBackendPool") var changed bool for _, lb := range lbs { lbName := ptr.Deref(lb.Name, "") @@ -545,7 +552,7 @@ func (az *Cloud) cleanupLocalServiceBackendPool( if changed { // Refresh the list of existing LBs after cleanup to update etags for the LBs. - klog.V(4).Info("Refreshing the list of existing LBs") + logger.V(4).Info("Refreshing the list of existing LBs") lbs, err = az.ListManagedLBs(ctx, svc, nodes, clusterName) if err != nil { return nil, fmt.Errorf("reconcileLoadBalancer: failed to list managed LB: %w", err) @@ -619,10 +626,11 @@ func (az *Cloud) reconcileIPsInLocalServiceBackendPoolsAsync( currentIPsInBackendPools map[string][]string, expectedIPs []string, ) { + logger := log.Background().WithName("reconcileIPsInLocalServiceBackendPoolsAsync") for bpName, currentIPs := range currentIPsInBackendPools { ipsToBeDeleted := compareNodeIPs(currentIPs, expectedIPs) if len(ipsToBeDeleted) == 0 && len(currentIPs) == len(expectedIPs) { - klog.V(4).Infof("No IP change detected for service %s, skip updating load balancer backend pool", serviceName) + logger.V(4).Info("No IP change detected for service, skip updating load balancer backend pool", "service", serviceName) return } if len(ipsToBeDeleted) > 0 { diff --git a/pkg/provider/azure_privatelinkservice.go b/pkg/provider/azure_privatelinkservice.go index 45522aa889..b251a6a02c 100644 --- a/pkg/provider/azure_privatelinkservice.go +++ b/pkg/provider/azure_privatelinkservice.go @@ -34,6 +34,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" fnutil "sigs.k8s.io/cloud-provider-azure/pkg/util/collectionutil" ) @@ -47,6 +48,7 @@ func (az *Cloud) reconcilePrivateLinkService( fipConfig *armnetwork.FrontendIPConfiguration, wantPLS bool, ) (bool /*deleted PLS*/, error) { + logger := log.Background().WithName("reconcilePrivateLinkService") isinternal := requiresInternalLoadBalancer(service) _, _, fipIPVersion := az.serviceOwnsFrontendIP(ctx, fipConfig, service) serviceName := getServiceName(service) @@ -64,14 +66,18 @@ func (az *Cloud) reconcilePrivateLinkService( isDualStack := isServiceDualStack(service) if isIPv6 { if isDualStack || !createPLS { - klog.V(2).Infof("IPv6 is not supported for private link service, skip reconcilePrivateLinkService for service(%s)", serviceName) + logger.V(2).Info("IPv6 is not supported for private link service, skip reconcilePrivateLinkService for service", "service", serviceName) return false, nil } return false, fmt.Errorf("IPv6 is not supported for private link service") } fipConfigID := fipConfig.ID - klog.V(2).Infof("reconcilePrivateLinkService for service(%s) - LB fipConfigID(%s) - wantPLS(%t) - createPLS(%t)", serviceName, ptr.Deref(fipConfig.Name, ""), wantPLS, createPLS) + logger.V(2).Info("", + "service", serviceName, + "LB fipConfigID", ptr.Deref(fipConfig.Name, ""), + "wantPLS", wantPLS, + "createPLS", createPLS) request := "ensure_privatelinkservice" if !wantPLS { @@ -99,7 +105,7 @@ func (az *Cloud) reconcilePrivateLinkService( exists := !strings.EqualFold(ptr.Deref(existingPLS.ID, ""), consts.PrivateLinkServiceNotExistID) if exists { - klog.V(4).Infof("reconcilePrivateLinkService for service(%s): found existing private link service attached(%s)", serviceName, ptr.Deref(existingPLS.Name, "")) + logger.V(4).Info("found existing private link service attached(%s)", "service", serviceName, "privateLinkService", ptr.Deref(existingPLS.Name, "")) if !isManagedPrivateLinkSerivce(existingPLS, clusterName) { return false, fmt.Errorf( "reconcilePrivateLinkService for service(%s) failed: LB frontend(%s) already has unmanaged private link service(%s)", @@ -120,11 +126,10 @@ func (az *Cloud) reconcilePrivateLinkService( ownerService, ) } - klog.V(2).Infof( - "reconcilePrivateLinkService for service(%s): automatically share private link service(%s) owned by service(%s)", - serviceName, - ptr.Deref(existingPLS.Name, ""), - ownerService, + logger.V(2).Info("automatically share private link service owned by another service", + "service", serviceName, + "privateLinkService", ptr.Deref(existingPLS.Name, ""), + "ownerService", ownerService, ) return false, nil } @@ -151,7 +156,7 @@ func (az *Cloud) reconcilePrivateLinkService( } if dirtyPLS { - klog.V(2).Infof("reconcilePrivateLinkService for service(%s): pls(%s) - updating", serviceName, plsName) + logger.V(2).Info("updating", "service", serviceName, "pls", plsName) err := az.disablePLSNetworkPolicy(ctx, service) if err != nil { klog.Errorf("reconcilePrivateLinkService for service(%s) disable PLS network policy failed for pls(%s): %v", serviceName, plsName, err.Error()) @@ -179,13 +184,13 @@ func (az *Cloud) reconcilePrivateLinkService( return false, deleteErr } isOperationSucceeded = true - klog.V(2).Infof("reconcilePrivateLinkService for service(%s) finished", serviceName) + logger.V(2).Info("finished", "service", serviceName) return true, nil // return true for successfully deleted PLS } } isOperationSucceeded = true - klog.V(2).Infof("reconcilePrivateLinkService for service(%s) finished", serviceName) + logger.V(2).Info("finished", "service", serviceName) return false, nil } @@ -240,13 +245,14 @@ func (az *Cloud) disablePLSNetworkPolicy(ctx context.Context, service *v1.Servic } func (az *Cloud) safeDeletePLS(ctx context.Context, pls *armnetwork.PrivateLinkService, service *v1.Service) error { + logger := log.Background().WithName("safeDeletePLS") if pls == nil { return nil } peConns := pls.Properties.PrivateEndpointConnections for _, peConn := range peConns { - klog.V(2).Infof("deletePLS: deleting PEConnection %s", ptr.Deref(peConn.Name, "")) + logger.V(2).Info("deleting PEConnection", "PEConnection", ptr.Deref(peConn.Name, "")) err := az.plsRepo.DeletePEConnection(ctx, az.getPLSResourceGroup(service), ptr.Deref(pls.Name, ""), ptr.Deref(peConn.Name, "")) if err != nil { return err @@ -260,7 +266,7 @@ func (az *Cloud) safeDeletePLS(ctx context.Context, pls *armnetwork.PrivateLinkS if rerr != nil { return rerr } - klog.V(2).Infof("safeDeletePLS(%s) finished", ptr.Deref(pls.Name, "")) + logger.V(2).Info("finished", "privateLinkService", ptr.Deref(pls.Name, "")) return nil } @@ -367,6 +373,7 @@ func (az *Cloud) reconcilePLSIpConfigs( existingPLS *armnetwork.PrivateLinkService, service *v1.Service, ) (bool, error) { + logger := log.Background().WithName("reconcilePLSIpConfigs") changed := false serviceName := getServiceName(service) @@ -418,7 +425,7 @@ func (az *Cloud) reconcilePLSIpConfigs( changed = true } if *ipConfig.Properties.PrivateIPAllocationMethod == armnetwork.IPAllocationMethodStatic { - klog.V(10).Infof("Found static IP: %s", ptr.Deref(ipConfig.Properties.PrivateIPAddress, "")) + logger.V(10).Info("Found static IP", "ip", ptr.Deref(ipConfig.Properties.PrivateIPAddress, "")) if _, found := staticIps[ptr.Deref(ipConfig.Properties.PrivateIPAddress, "")]; !found { changed = true } diff --git a/pkg/provider/azure_publicip_repo.go b/pkg/provider/azure_publicip_repo.go index f39553270a..c7c8ad4878 100644 --- a/pkg/provider/azure_publicip_repo.go +++ b/pkg/provider/azure_publicip_repo.go @@ -40,11 +40,12 @@ import ( // CreateOrUpdatePIP invokes az.NetworkClientFactory.GetPublicIPAddressClient().CreateOrUpdate with exponential backoff retry func (az *Cloud) CreateOrUpdatePIP(service *v1.Service, pipResourceGroup string, pip *armnetwork.PublicIPAddress) error { + logger := klog.Background().WithName("CreateOrUpdatePIP") ctx, cancel := getContextWithCancel() defer cancel() _, rerr := az.NetworkClientFactory.GetPublicIPAddressClient().CreateOrUpdate(ctx, pipResourceGroup, ptr.Deref(pip.Name, ""), *pip) - klog.V(10).Infof("NetworkClientFactory.GetPublicIPAddressClient().CreateOrUpdate(%s, %s): end", pipResourceGroup, ptr.Deref(pip.Name, "")) + logger.V(10).Info("NetworkClientFactory.GetPublicIPAddressClient().CreateOrUpdate end", "pipResourceGroup", pipResourceGroup, "pipName", ptr.Deref(pip.Name, "")) if rerr == nil { // Invalidate the cache right after updating _ = az.pipCache.Delete(pipResourceGroup) @@ -59,7 +60,7 @@ func (az *Cloud) CreateOrUpdatePIP(service *v1.Service, pipResourceGroup string, var respError *azcore.ResponseError if errors.As(rerr, &respError) && respError != nil { if respError.StatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("PublicIP cache for (%s, %s) is cleanup because of http.StatusPreconditionFailed", pipResourceGroup, ptr.Deref(pip.Name, "")) + logger.V(3).Info("PublicIP cache is cleanup because of http.StatusPreconditionFailed", "pipResourceGroup", pipResourceGroup, "pipName", ptr.Deref(pip.Name, "")) _ = az.pipCache.Delete(pipResourceGroup) } } @@ -67,7 +68,7 @@ func (az *Cloud) CreateOrUpdatePIP(service *v1.Service, pipResourceGroup string, retryErrorMessage := rerr.Error() // Invalidate the cache because another new operation has canceled the current request. if strings.Contains(strings.ToLower(retryErrorMessage), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("PublicIP cache for (%s, %s) is cleanup because CreateOrUpdate is canceled by another operation", pipResourceGroup, ptr.Deref(pip.Name, "")) + logger.V(3).Info("PublicIP cache is cleanup because CreateOrUpdate is canceled by another operation", "pipResourceGroup", pipResourceGroup, "pipName", ptr.Deref(pip.Name, "")) _ = az.pipCache.Delete(pipResourceGroup) } diff --git a/pkg/provider/azure_routes.go b/pkg/provider/azure_routes.go index c4e0962ad6..f8ed41c0ee 100644 --- a/pkg/provider/azure_routes.go +++ b/pkg/provider/azure_routes.go @@ -100,12 +100,13 @@ func (d *delayedRouteUpdater) run(ctx context.Context) { // updateRoutes invokes route table client to update all routes. func (d *delayedRouteUpdater) updateRoutes(ctx context.Context) { + logger := log.Background().WithName("updateRoutes") d.lock.Lock() defer d.lock.Unlock() // No need to do any updating. if len(d.routesToUpdate) == 0 { - klog.V(4).Info("updateRoutes: nothing to update, returning") + logger.V(4).Info("nothing to update, returning") return } @@ -191,7 +192,7 @@ func (d *delayedRouteUpdater) updateRoutes(ctx context.Context) { if rt.operation == routeOperationDelete { routes = append(routes[:i], routes[i+1:]...) dirty = true - klog.V(2).Infof("updateRoutes: found outdated route %s targeting node %s, removing it", ptr.Deref(rt.route.Name, ""), rt.nodeName) + logger.V(2).Info("found outdated route targeting node, removing it", "route", ptr.Deref(rt.route.Name, ""), "node", rt.nodeName) } } } @@ -211,7 +212,7 @@ func (d *delayedRouteUpdater) updateRoutes(ctx context.Context) { if dirty { if !onlyUpdateTags { - klog.V(2).Infof("updateRoutes: updating routes") + logger.V(2).Info("updating routes") routeTable.Properties.Routes = routes } _, err := d.az.routeTableRepo.CreateOrUpdate(ctx, *routeTable) @@ -228,20 +229,21 @@ func (d *delayedRouteUpdater) updateRoutes(ctx context.Context) { // cleanupOutdatedRoutes deletes all non-dualstack routes when dualstack is enabled, // and deletes all dualstack routes when dualstack is not enabled. func (d *delayedRouteUpdater) cleanupOutdatedRoutes(existingRoutes []*armnetwork.Route) (routes []*armnetwork.Route, changed bool) { + logger := log.Background().WithName("cleanupOutdatedRoutes") for i := len(existingRoutes) - 1; i >= 0; i-- { existingRouteName := ptr.Deref(existingRoutes[i].Name, "") split := strings.Split(existingRouteName, consts.RouteNameSeparator) - klog.V(4).Infof("cleanupOutdatedRoutes: checking route %s", existingRouteName) + logger.V(4).Info("checking route", "route", existingRouteName) // filter out unmanaged routes deleteRoute := false if d.az.nodeNames.Has(split[0]) { if d.az.ipv6DualStackEnabled && len(split) == 1 { - klog.V(2).Infof("cleanupOutdatedRoutes: deleting outdated non-dualstack route %s", existingRouteName) + logger.V(2).Info("deleting outdated non-dualstack route", "route", existingRouteName) deleteRoute = true } else if !d.az.ipv6DualStackEnabled && len(split) == 2 { - klog.V(2).Infof("cleanupOutdatedRoutes: deleting outdated dualstack route %s", existingRouteName) + logger.V(2).Info("deleting outdated dualstack route", "route", existingRouteName) deleteRoute = true } @@ -295,7 +297,8 @@ func (d *delayedRouteUpdater) removeOperation(_ string) {} // ListRoutes lists all managed routes that belong to the specified clusterName // implements cloudprovider.Routes.ListRoutes func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudprovider.Route, error) { - klog.V(10).Infof("ListRoutes: START clusterName=%q", clusterName) + logger := log.Background().WithName("ListRoutes") + logger.V(10).Info("START", "clusterName", clusterName) routeTable, err := az.routeTableRepo.Get(ctx, az.RouteTableName, azcache.CacheReadTypeDefault) routes, err := processRoutes(az.ipv6DualStackEnabled, routeTable, err) if err != nil { @@ -322,7 +325,7 @@ func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpr // ensure the route table is tagged as configured tags, changed := az.ensureRouteTableTagged(routeTable) if changed { - klog.V(2).Infof("ListRoutes: updating tags on route table %s", ptr.Deref(routeTable.Name, "")) + logger.V(2).Info("updating tags on route table", "routeTableName", ptr.Deref(routeTable.Name, "")) op := az.routeUpdater.addOperation(getUpdateRouteTableTagsOperation(tags)) // Wait for operation complete. @@ -338,6 +341,7 @@ func (az *Cloud) ListRoutes(ctx context.Context, clusterName string) ([]*cloudpr // Injectable for testing func processRoutes(ipv6DualStackEnabled bool, routeTable *armnetwork.RouteTable, err error) ([]*cloudprovider.Route, error) { + logger := log.Background().WithName("processRoutes") if err != nil { return nil, err } @@ -351,7 +355,7 @@ func processRoutes(ipv6DualStackEnabled bool, routeTable *armnetwork.RouteTable, for i, route := range routeTable.Properties.Routes { instance := MapRouteNameToNodeName(ipv6DualStackEnabled, *route.Name) cidr := *route.Properties.AddressPrefix - klog.V(10).Infof("ListRoutes: * instance=%q, cidr=%q", instance, cidr) + logger.V(10).Info("ListRoutes: *", "instance", instance, "cidr", cidr) kubeRoutes[i] = &cloudprovider.Route{ Name: *route.Name, @@ -361,18 +365,19 @@ func processRoutes(ipv6DualStackEnabled bool, routeTable *armnetwork.RouteTable, } } - klog.V(10).Info("ListRoutes: FINISH") + logger.V(10).Info("ListRoutes: FINISH") return kubeRoutes, nil } func (az *Cloud) createRouteTable(ctx context.Context) error { + logger := log.Background().WithName("createRouteTable") routeTable := armnetwork.RouteTable{ Name: ptr.To(az.RouteTableName), Location: ptr.To(az.Location), Properties: &armnetwork.RouteTablePropertiesFormat{}, } - klog.V(3).Infof("createRouteTableIfNotExists: creating routetable. routeTableName=%q", az.RouteTableName) + logger.V(3).Info("creating routetable if not exists", "routeTableName", az.RouteTableName) _, err := az.routeTableRepo.CreateOrUpdate(ctx, routeTable) return err } @@ -382,6 +387,7 @@ func (az *Cloud) createRouteTable(ctx context.Context) error { // to create a more user-meaningful name. // implements cloudprovider.Routes.CreateRoute func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, _ string, kubeRoute *cloudprovider.Route) error { + logger := log.Background().WithName("CreateRoute") mc := metrics.NewMetricContext("routes", "create_route", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), string(kubeRoute.TargetNode)) isOperationSucceeded := false defer func() { @@ -396,7 +402,7 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, _ string, return err } if unmanaged { - klog.V(2).Infof("CreateRoute: omitting unmanaged node %q", kubeRoute.TargetNode) + logger.V(2).Info("omitting unmanaged node", "node", kubeRoute.TargetNode) az.routeCIDRsLock.Lock() defer az.routeCIDRsLock.Unlock() az.routeCIDRs[nodeName] = kubeRoute.DestinationCIDR @@ -415,16 +421,16 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, _ string, } else { // for dual stack and single stack IPv6 we need to select // a private ip that matches family of the cidr - klog.V(4).Infof("CreateRoute: create route instance=%q cidr=%q is in dual stack mode", kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + logger.V(4).Info("create route instance in dual stack mode", "instance", kubeRoute.TargetNode, "cidr", kubeRoute.DestinationCIDR) nodePrivateIPs, err := az.getPrivateIPsForMachine(ctx, kubeRoute.TargetNode) if nil != err { - klog.V(3).Infof("CreateRoute: create route: failed(GetPrivateIPsByNodeName) instance=%q cidr=%q with error=%v", kubeRoute.TargetNode, kubeRoute.DestinationCIDR, err) + logger.V(3).Error(err, "create route: failed(GetPrivateIPsByNodeName)", "instance", kubeRoute.TargetNode, "cidr", kubeRoute.DestinationCIDR) return err } targetIP, err = findFirstIPByFamily(nodePrivateIPs, CIDRv6) if nil != err { - klog.V(3).Infof("CreateRoute: create route: failed(findFirstIpByFamily) instance=%q cidr=%q with error=%v", kubeRoute.TargetNode, kubeRoute.DestinationCIDR, err) + logger.V(3).Error(err, "create route: failed(findFirstIpByFamily)", "instance", kubeRoute.TargetNode, "cidr", kubeRoute.DestinationCIDR) return err } } @@ -438,7 +444,7 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, _ string, }, } - klog.V(2).Infof("CreateRoute: creating route for clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + logger.V(2).Info("creating route", "clusterName", clusterName, "instance", kubeRoute.TargetNode, "cidr", kubeRoute.DestinationCIDR) op := az.routeUpdater.addOperation(getAddRouteOperation(route, string(kubeRoute.TargetNode))) // Wait for operation complete. @@ -448,7 +454,7 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, _ string, return err } - klog.V(2).Infof("CreateRoute: route created. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + logger.V(2).Info("route created", "clusterName", clusterName, "instance", kubeRoute.TargetNode, "cidr", kubeRoute.DestinationCIDR) isOperationSucceeded = true return nil @@ -458,6 +464,7 @@ func (az *Cloud) CreateRoute(ctx context.Context, clusterName string, _ string, // Route should be as returned by ListRoutes // implements cloudprovider.Routes.DeleteRoute func (az *Cloud) DeleteRoute(_ context.Context, clusterName string, kubeRoute *cloudprovider.Route) error { + logger := log.Background().WithName("DeleteRoute") mc := metrics.NewMetricContext("routes", "delete_route", az.ResourceGroup, az.getNetworkResourceSubscriptionID(), string(kubeRoute.TargetNode)) isOperationSucceeded := false defer func() { @@ -471,7 +478,7 @@ func (az *Cloud) DeleteRoute(_ context.Context, clusterName string, kubeRoute *c return err } if unmanaged { - klog.V(2).Infof("DeleteRoute: omitting unmanaged node %q", kubeRoute.TargetNode) + logger.V(2).Info("omitting unmanaged node", "node", kubeRoute.TargetNode) az.routeCIDRsLock.Lock() defer az.routeCIDRsLock.Unlock() delete(az.routeCIDRs, nodeName) @@ -479,7 +486,7 @@ func (az *Cloud) DeleteRoute(_ context.Context, clusterName string, kubeRoute *c } routeName := mapNodeNameToRouteName(az.ipv6DualStackEnabled, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) - klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q routeName=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR, routeName) + logger.V(2).Info("deleting route", "clusterName", clusterName, "instance", kubeRoute.TargetNode, "cidr", kubeRoute.DestinationCIDR, "routeName", routeName) route := &armnetwork.Route{ Name: ptr.To(routeName), Properties: &armnetwork.RoutePropertiesFormat{}, @@ -496,7 +503,7 @@ func (az *Cloud) DeleteRoute(_ context.Context, clusterName string, kubeRoute *c // Remove outdated ipv4 routes as well if az.ipv6DualStackEnabled { routeNameWithoutIPV6Suffix := strings.Split(routeName, consts.RouteNameSeparator)[0] - klog.V(2).Infof("DeleteRoute: deleting route. clusterName=%q instance=%q cidr=%q routeName=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR, routeNameWithoutIPV6Suffix) + logger.V(2).Info("deleting route", "clusterName", clusterName, "instance", kubeRoute.TargetNode, "cidr", kubeRoute.DestinationCIDR, "routeName", routeNameWithoutIPV6Suffix) route := &armnetwork.Route{ Name: ptr.To(routeNameWithoutIPV6Suffix), Properties: &armnetwork.RoutePropertiesFormat{}, @@ -511,7 +518,7 @@ func (az *Cloud) DeleteRoute(_ context.Context, clusterName string, kubeRoute *c } } - klog.V(2).Infof("DeleteRoute: route deleted. clusterName=%q instance=%q cidr=%q", clusterName, kubeRoute.TargetNode, kubeRoute.DestinationCIDR) + logger.V(2).Info("route deleted", "clusterName", clusterName, "instance", kubeRoute.TargetNode, "cidr", kubeRoute.DestinationCIDR) isOperationSucceeded = true return nil diff --git a/pkg/provider/azure_standard.go b/pkg/provider/azure_standard.go index 10a02d1de2..a4388b9295 100644 --- a/pkg/provider/azure_standard.go +++ b/pkg/provider/azure_standard.go @@ -42,6 +42,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" vmutil "sigs.k8s.io/cloud-provider-azure/pkg/util/vm" ) @@ -331,6 +332,7 @@ func (az *Cloud) getRulePrefix(service *v1.Service) string { } func (az *Cloud) getPublicIPName(clusterName string, service *v1.Service, isIPv6 bool) (string, error) { + logger := log.Background().WithName("getPublicIPName") isDualStack := isServiceDualStack(service) pipName := fmt.Sprintf("%s-%s", clusterName, az.GetLoadBalancerName(context.TODO(), clusterName, service)) if id := getServicePIPPrefixID(service, isIPv6); id != "" { @@ -344,12 +346,13 @@ func (az *Cloud) getPublicIPName(clusterName string, service *v1.Service, isIPv6 maxLength := consts.PIPPrefixNameMaxLength - consts.IPFamilySuffixLength if len(pipName) > maxLength { pipNameSegment = pipNameSegment[:maxLength] - klog.V(6).Infof("original PIP name is lengthy %q, truncate it to %q", pipName, pipNameSegment) + logger.V(6).Info("original PIP name is lengthy, truncate it", "originalName", pipName, "truncatedName", pipNameSegment) } return getResourceByIPFamily(pipNameSegment, isDualStack, isIPv6), nil } func publicIPOwnsFrontendIP(service *v1.Service, fip *armnetwork.FrontendIPConfiguration, pip *armnetwork.PublicIPAddress) bool { + logger := log.Background().WithName("publicIPOwnsFrontendIP") if pip != nil && pip.ID != nil && pip.Properties != nil && @@ -358,7 +361,7 @@ func publicIPOwnsFrontendIP(service *v1.Service, fip *armnetwork.FrontendIPConfi fip.Properties != nil && fip.Properties.PublicIPAddress != nil { if strings.EqualFold(ptr.Deref(pip.ID, ""), ptr.Deref(fip.Properties.PublicIPAddress.ID, "")) { - klog.V(6).Infof("publicIPOwnsFrontendIP:found secondary service %s of the frontend IP config %s", service.Name, *fip.Name) + logger.V(6).Info("found secondary service of the frontend IP config", "serviceName", service.Name, "fipName", *fip.Name) return true } } @@ -474,6 +477,7 @@ func newAvailabilitySet(az *Cloud) (VMSet, error) { // It must return ("", cloudprovider.InstanceNotFound) if the instance does // not exist or is no longer running. func (as *availabilitySet) GetInstanceIDByNodeName(ctx context.Context, name string) (string, error) { + logger := log.Background().WithName("GetInstanceIDByNodeName") var machine *armcompute.VirtualMachine var err error @@ -484,10 +488,10 @@ func (as *availabilitySet) GetInstanceIDByNodeName(ctx context.Context, name str } if err != nil { if as.CloudProviderBackoff { - klog.V(2).Infof("GetInstanceIDByNodeName(%s) backing off", name) + logger.V(2).Info("backing off", "node", name) machine, err = as.GetVirtualMachineWithRetry(ctx, types.NodeName(name), azcache.CacheReadTypeUnsafe) if err != nil { - klog.V(2).Infof("GetInstanceIDByNodeName(%s) abort backoff", name) + logger.V(2).Info("abort backoff", "node", name) return "", err } } else { @@ -506,6 +510,7 @@ func (as *availabilitySet) GetInstanceIDByNodeName(ctx context.Context, name str // GetPowerStatusByNodeName returns the power state of the specified node. func (as *availabilitySet) GetPowerStatusByNodeName(ctx context.Context, name string) (powerState string, err error) { + logger := log.Background().WithName("GetPowerStatusByNodeName") vm, err := as.getVirtualMachine(ctx, types.NodeName(name), azcache.CacheReadTypeDefault) if err != nil { return powerState, err @@ -516,7 +521,7 @@ func (as *availabilitySet) GetPowerStatusByNodeName(ctx context.Context, name st } // vm.Properties.InstanceView or vm.Properties.InstanceView.Statuses are nil when the VM is under deleting. - klog.V(3).Infof("InstanceView for node %q is nil, assuming it's deleting", name) + logger.V(3).Info("InstanceView for node is nil, assuming it's deleting", "node", name) return consts.VMPowerStateUnknown, nil } @@ -739,6 +744,7 @@ func (as *availabilitySet) GetVMSetNames(ctx context.Context, service *v1.Servic } func (as *availabilitySet) GetNodeVMSetName(ctx context.Context, node *v1.Node) (string, error) { + logger := log.Background().WithName("as.GetNodeVMSetName") var hostName string for _, nodeAddress := range node.Status.Addresses { if strings.EqualFold(string(nodeAddress.Type), string(v1.NodeHostName)) { @@ -765,7 +771,7 @@ func (as *availabilitySet) GetNodeVMSetName(ctx context.Context, node *v1.Node) for _, vm := range vms { if strings.EqualFold(ptr.Deref(vm.Name, ""), hostName) { if vm.Properties.AvailabilitySet != nil && ptr.Deref(vm.Properties.AvailabilitySet.ID, "") != "" { - klog.V(4).Infof("as.GetNodeVMSetName: found vm %s", hostName) + logger.V(4).Info("found vm", "vm", hostName) asName, err = getLastSegment(ptr.Deref(vm.Properties.AvailabilitySet.ID, ""), "/") if err != nil { @@ -778,7 +784,7 @@ func (as *availabilitySet) GetNodeVMSetName(ctx context.Context, node *v1.Node) } } - klog.V(4).Infof("as.GetNodeVMSetName: found availability set name %s from node name %s", asName, node.Name) + logger.V(4).Info("found availability set name from node name", "set name", asName, "node name", node.Name) return asName, nil } @@ -800,11 +806,12 @@ func extractResourceGroupByNicID(nicID string) (string, error) { // getPrimaryInterfaceWithVMSet gets machine primary network interface by node name and vmSet. func (as *availabilitySet) getPrimaryInterfaceWithVMSet(ctx context.Context, nodeName, vmSetName string) (*armnetwork.Interface, string, error) { + logger := log.Background().WithName("getPrimaryInterfaceWithVMSet") var machine *armcompute.VirtualMachine machine, err := as.GetVirtualMachineWithRetry(ctx, types.NodeName(nodeName), azcache.CacheReadTypeDefault) if err != nil { - klog.V(2).Infof("GetPrimaryInterface(%s, %s) abort backoff", nodeName, vmSetName) + logger.V(2).Info("abort backoff", "nodeName", nodeName, "vmSetName", vmSetName) return nil, "", err } @@ -834,8 +841,7 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(ctx context.Context, nod if vmSetName != "" && needCheck { expectedAvailabilitySetID := as.getAvailabilitySetID(nodeResourceGroup, vmSetName) if machine.Properties.AvailabilitySet == nil || !strings.EqualFold(*machine.Properties.AvailabilitySet.ID, expectedAvailabilitySetID) { - klog.V(3).Infof( - "GetPrimaryInterface: nic (%s) is not in the availabilitySet(%s)", nicName, vmSetName) + logger.V(3).Info("nic is not in the availabilitySet", "nic", nicName, "availabilitySet", vmSetName) return nil, "", errNotInVMSet } } @@ -862,12 +868,13 @@ func (as *availabilitySet) getPrimaryInterfaceWithVMSet(ctx context.Context, nod // EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is // participating in the specified LoadBalancer Backend Pool. func (as *availabilitySet) EnsureHostInPool(ctx context.Context, service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetName string) (string, string, string, *armcompute.VirtualMachineScaleSetVM, error) { + logger := log.Background().WithName("EnsureHostInPool") vmName := mapNodeNameToVMName(nodeName) serviceName := getServiceName(service) nic, _, err := as.getPrimaryInterfaceWithVMSet(ctx, vmName, vmSetName) if err != nil { if errors.Is(err, errNotInVMSet) { - klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the vmSet %s", nodeName, vmSetName) + logger.V(3).Info("skips node because it is not in the vmSet", "node", nodeName, "vmSet", vmSetName) return "", "", "", nil, nil } @@ -922,7 +929,7 @@ func (as *availabilitySet) EnsureHostInPool(ctx context.Context, service *v1.Ser return "", "", "", nil, err } if !isSameLB { - klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName) + logger.V(4).Info("Node has already been added to LB, omit adding it to a new one", "node", nodeName, "oldLBName", oldLBName) return "", "", "", nil, nil } } @@ -935,7 +942,7 @@ func (as *availabilitySet) EnsureHostInPool(ctx context.Context, service *v1.Ser primaryIPConfig.Properties.LoadBalancerBackendAddressPools = newBackendPools nicName := *nic.Name - klog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) + logger.V(3).Info("updating", "nicupdate", serviceName, "nic", nicName) err := as.CreateOrUpdateInterface(ctx, service, nic) if err != nil { return "", "", "", nil, err @@ -947,6 +954,7 @@ func (as *availabilitySet) EnsureHostInPool(ctx context.Context, service *v1.Ser // EnsureHostsInPool ensures the given Node's primary IP configurations are // participating in the specified LoadBalancer Backend Pool. func (as *availabilitySet) EnsureHostsInPool(ctx context.Context, service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetName string) error { + logger := log.Background().WithName("EnsureHostsInPool") mc := metrics.NewMetricContext("services", "vmas_ensure_hosts_in_pool", as.ResourceGroup, as.SubscriptionID, getServiceName(service)) isOperationSucceeded := false defer func() { @@ -957,7 +965,7 @@ func (as *availabilitySet) EnsureHostsInPool(ctx context.Context, service *v1.Se for _, node := range nodes { localNodeName := node.Name if as.UseStandardLoadBalancer() && as.ExcludeMasterNodesFromStandardLB() && isControlPlaneNode(node) { - klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID) + logger.V(4).Info("Excluding master node from load balancer backendpool", "node", localNodeName, "backendpool", backendPoolID) continue } @@ -967,7 +975,7 @@ func (as *availabilitySet) EnsureHostsInPool(ctx context.Context, service *v1.Se return err } if shouldExcludeLoadBalancer { - klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) + logger.V(4).Info("Excluding unmanaged/external-resource-group node", "node", localNodeName) continue } @@ -993,6 +1001,7 @@ func (as *availabilitySet) EnsureHostsInPool(ctx context.Context, service *v1.Se // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes. // backendPoolIDs are the IDs of the backendpools to be deleted. func (as *availabilitySet) EnsureBackendPoolDeleted(ctx context.Context, service *v1.Service, backendPoolIDs []string, vmSetName string, backendAddressPools []*armnetwork.BackendAddressPool, _ bool) (bool, error) { + logger := log.Background().WithName("EnsureBackendPoolDeleted") // Returns nil if backend address pools already deleted. if backendAddressPools == nil { return false, nil @@ -1045,7 +1054,7 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(ctx context.Context, service nic, vmasID, err := as.getPrimaryInterfaceWithVMSet(ctx, vmName, vmSetName) if err != nil { if errors.Is(err, errNotInVMSet) { - klog.V(3).Infof("EnsureBackendPoolDeleted skips node %s because it is not in the vmSet %s", nodeName, vmSetName) + logger.V(3).Info("skips node because it is not in the vmSet", "node", nodeName, "vmSet", vmSetName) return false, nil } @@ -1059,7 +1068,7 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(ctx context.Context, service // Only remove nodes belonging to specified vmSet to basic LB backends. // If vmasID is empty, then it is standalone VM. if vmasID != "" && !strings.EqualFold(vmasName, vmSetName) { - klog.V(2).Infof("EnsureBackendPoolDeleted: skipping the node %s belonging to another vm set %s", nodeName, vmasName) + logger.V(2).Info("skipping the node belonging to another vm set", "node", nodeName, "vmSet", vmasName) continue } @@ -1101,7 +1110,7 @@ func (as *availabilitySet) EnsureBackendPoolDeleted(ctx context.Context, service } nic.Properties.IPConfigurations = newIPConfigs nicUpdaters = append(nicUpdaters, func() error { - klog.V(2).Infof("EnsureBackendPoolDeleted begins to CreateOrUpdate for NIC(%s, %s) with backendPoolIDs %q", as.ResourceGroup, ptr.Deref(nic.Name, ""), backendPoolIDs) + logger.V(2).Info("begins to CreateOrUpdate for NIC with backendPoolIDs", "resourceGroup", as.ResourceGroup, "nicName", ptr.Deref(nic.Name, ""), "backendPoolIDs", backendPoolIDs) _, rerr := as.ComputeClientFactory.GetInterfaceClient().CreateOrUpdate(ctx, as.ResourceGroup, ptr.Deref(nic.Name, ""), *nic) if rerr != nil { klog.Errorf("EnsureBackendPoolDeleted CreateOrUpdate for NIC(%s, %s) failed with error %v", as.ResourceGroup, ptr.Deref(nic.Name, ""), rerr.Error()) @@ -1140,9 +1149,10 @@ func getAvailabilitySetNameByID(asID string) (string, error) { // GetNodeNameByIPConfigurationID gets the node name and the availabilitySet name by IP configuration ID. func (as *availabilitySet) GetNodeNameByIPConfigurationID(ctx context.Context, ipConfigurationID string) (string, string, error) { + logger := log.Background().WithName("GetNodeNameByIPConfigurationID") matches := nicIDRE.FindStringSubmatch(ipConfigurationID) if len(matches) != 3 { - klog.V(4).Infof("Can not extract VM name from ipConfigurationID (%s)", ipConfigurationID) + logger.V(4).Info("Can not extract VM name from ipConfigurationID", "ipConfigurationID", ipConfigurationID) return "", "", fmt.Errorf("invalid ip config ID %s", ipConfigurationID) } @@ -1159,7 +1169,7 @@ func (as *availabilitySet) GetNodeNameByIPConfigurationID(ctx context.Context, i vmID = ptr.Deref(nic.Properties.VirtualMachine.ID, "") } if vmID == "" { - klog.V(2).Infof("GetNodeNameByIPConfigurationID(%s): empty vmID", ipConfigurationID) + logger.V(2).Info("empty vmID", "ipConfigurationID", ipConfigurationID) return "", "", nil } diff --git a/pkg/provider/azure_utils.go b/pkg/provider/azure_utils.go index 92608e2e9b..154ce4c231 100644 --- a/pkg/provider/azure_utils.go +++ b/pkg/provider/azure_utils.go @@ -32,6 +32,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) const ( @@ -62,6 +63,7 @@ func getContextWithCancel() (context.Context, context.CancelFunc) { // // XXX: return error instead of logging; decouple tag parsing and tag application func parseTags(tags string, tagsMap map[string]string) map[string]*string { + logger := log.Background().WithName("parseTags") formatted := make(map[string]*string) if tags != "" { @@ -99,7 +101,7 @@ func parseTags(tags string, tagsMap map[string]string) map[string]*string { } if found, k := findKeyInMapCaseInsensitive(formatted, key); found && k != key { - klog.V(4).Infof("parseTags: found identical keys: %s from tags and %s from tagsMap (case-insensitive), %s will replace %s", k, key, key, k) + logger.V(4).Info("found identical keys from tags and tagsMap (case-insensitive), will replace", "identical keys", k, "keyFromTagsMap", key) delete(formatted, k) } formatted[key] = ptr.To(value) @@ -137,6 +139,7 @@ func findKeyInMapWithPrefix(targetMap map[string]*string, key string) (bool, str } func (az *Cloud) reconcileTags(currentTagsOnResource, newTags map[string]*string) (reconciledTags map[string]*string, changed bool) { + logger := log.Background().WithName("reconcileTags") var systemTags []string systemTagsMap := make(map[string]*string) @@ -169,7 +172,7 @@ func (az *Cloud) reconcileTags(currentTagsOnResource, newTags map[string]*string for k := range currentTagsOnResource { if _, ok := newTags[k]; !ok { if found, _ := findKeyInMapWithPrefix(systemTagsMap, k); !found { - klog.V(2).Infof("reconcileTags: delete tag %s: %s", k, ptr.Deref(currentTagsOnResource[k], "")) + logger.V(2).Info("delete tag", "key", k, "value", ptr.Deref(currentTagsOnResource[k], "")) delete(currentTagsOnResource, k) changed = true } @@ -189,10 +192,11 @@ func getExtendedLocationTypeFromString(extendedLocationType string) armnetwork.E } func getNodePrivateIPAddress(node *v1.Node, isIPv6 bool) string { + logger := log.Background().WithName("getNodePrivateIPAddress") for _, nodeAddress := range node.Status.Addresses { if strings.EqualFold(string(nodeAddress.Type), string(v1.NodeInternalIP)) && utilnet.IsIPv6String(nodeAddress.Address) == isIPv6 { - klog.V(6).Infof("getNodePrivateIPAddress: node %s, ip %s", node.Name, nodeAddress.Address) + logger.V(6).Info("", "node", node.Name, "ip", nodeAddress.Address) return nodeAddress.Address } } @@ -522,9 +526,10 @@ func getServiceIPFamily(service *v1.Service) string { // getResourceGroupAndNameFromNICID parses the ip configuration ID to get the resource group and nic name. func getResourceGroupAndNameFromNICID(ipConfigurationID string) (string, string, error) { + logger := log.Background().WithName("getResourceGroupAndNameFromNICID") matches := nicIDRE.FindStringSubmatch(ipConfigurationID) if len(matches) != 3 { - klog.V(4).Infof("Can not extract nic name from ipConfigurationID (%s)", ipConfigurationID) + logger.V(4).Info("Can not extract nic name from ipConfigurationID", "ipConfigurationID", ipConfigurationID) return "", "", fmt.Errorf("invalid ip config ID %s", ipConfigurationID) } diff --git a/pkg/provider/azure_vmsets_repo.go b/pkg/provider/azure_vmsets_repo.go index 679a7e3247..49f0edbd43 100644 --- a/pkg/provider/azure_vmsets_repo.go +++ b/pkg/provider/azure_vmsets_repo.go @@ -31,10 +31,12 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) // GetVirtualMachineWithRetry invokes az.getVirtualMachine with exponential backoff retry func (az *Cloud) GetVirtualMachineWithRetry(ctx context.Context, name types.NodeName, crt azcache.AzureCacheReadType) (*armcompute.VirtualMachine, error) { + logger := log.Background().WithName("GetVirtualMachineWithRetry") var machine *armcompute.VirtualMachine var retryErr error err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { @@ -46,7 +48,7 @@ func (az *Cloud) GetVirtualMachineWithRetry(ctx context.Context, name types.Node klog.Errorf("GetVirtualMachineWithRetry(%s): backoff failure, will retry, err=%v", name, retryErr) return false, nil } - klog.V(2).Infof("GetVirtualMachineWithRetry(%s): backoff success", name) + logger.V(2).Info("backoff success", "node", name) return true, nil }) if errors.Is(err, wait.ErrWaitTimeout) { @@ -57,12 +59,13 @@ func (az *Cloud) GetVirtualMachineWithRetry(ctx context.Context, name types.Node // ListVirtualMachines invokes az.ComputeClientFactory.GetVirtualMachineClient().List with exponential backoff retry func (az *Cloud) ListVirtualMachines(ctx context.Context, resourceGroup string) ([]*armcompute.VirtualMachine, error) { + logger := log.Background().WithName("ListVirtualMachines") allNodes, err := az.ComputeClientFactory.GetVirtualMachineClient().List(ctx, resourceGroup) if err != nil { klog.Errorf("ComputeClientFactory.GetVirtualMachineClient().List(%v) failure with err=%v", resourceGroup, err) return nil, err } - klog.V(6).Infof("ComputeClientFactory.GetVirtualMachineClient().List(%v) success", resourceGroup) + logger.V(6).Info("ComputeClientFactory.GetVirtualMachineClient().List success", "resourceGroup", resourceGroup) return allNodes, nil } @@ -73,6 +76,7 @@ func (az *Cloud) getPrivateIPsForMachine(ctx context.Context, nodeName types.Nod } func (az *Cloud) getPrivateIPsForMachineWithRetry(ctx context.Context, nodeName types.NodeName) ([]string, error) { + logger := log.Background().WithName("getPrivateIPsForMachineWithRetry") var privateIPs []string err := wait.ExponentialBackoff(az.RequestBackoff(), func() (bool, error) { var retryErr error @@ -85,7 +89,7 @@ func (az *Cloud) getPrivateIPsForMachineWithRetry(ctx context.Context, nodeName klog.Errorf("GetPrivateIPsByNodeName(%s): backoff failure, will retry,err=%v", nodeName, retryErr) return false, nil } - klog.V(3).Infof("GetPrivateIPsByNodeName(%s): backoff success", nodeName) + logger.V(3).Info("backoff success", "node", nodeName) return true, nil }) return privateIPs, err @@ -97,6 +101,7 @@ func (az *Cloud) getIPForMachine(ctx context.Context, nodeName types.NodeName) ( // GetIPForMachineWithRetry invokes az.getIPForMachine with exponential backoff retry func (az *Cloud) GetIPForMachineWithRetry(ctx context.Context, name types.NodeName) (string, string, error) { + logger := log.Background().WithName("GetIPForMachineWithRetry") var ip, publicIP string err := wait.ExponentialBackoffWithContext(ctx, az.RequestBackoff(), func(ctx context.Context) (bool, error) { var retryErr error @@ -105,13 +110,14 @@ func (az *Cloud) GetIPForMachineWithRetry(ctx context.Context, name types.NodeNa klog.Errorf("GetIPForMachineWithRetry(%s): backoff failure, will retry,err=%v", name, retryErr) return false, nil } - klog.V(3).Infof("GetIPForMachineWithRetry(%s): backoff success", name) + logger.V(3).Info("backoff success", "node", name) return true, nil }) return ip, publicIP, err } func (az *Cloud) newVMCache() (azcache.Resource, error) { + logger := log.Background().WithName("newVMCache") getter := func(ctx context.Context, key string) (interface{}, error) { // Currently InstanceView request are used by azure_zones, while the calls come after non-InstanceView // request. If we first send an InstanceView request and then a non InstanceView request, the second @@ -132,13 +138,13 @@ func (az *Cloud) newVMCache() (azcache.Resource, error) { } if !exists { - klog.V(2).Infof("Virtual machine %q not found", key) + logger.V(2).Info("Virtual machine not found", "vmName", key) return nil, nil } if vm != nil && vm.Properties != nil && strings.EqualFold(ptr.Deref(vm.Properties.ProvisioningState, ""), string(consts.ProvisioningStateDeleting)) { - klog.V(2).Infof("Virtual machine %q is under deleting", key) + logger.V(2).Info("Virtual machine is under deleting", "vmName", key) return nil, nil } diff --git a/pkg/provider/azure_vmss.go b/pkg/provider/azure_vmss.go index 9198c452b7..5c41cc4e7f 100644 --- a/pkg/provider/azure_vmss.go +++ b/pkg/provider/azure_vmss.go @@ -171,6 +171,7 @@ func newScaleSet(az *Cloud) (VMSet, error) { } func (ss *ScaleSet) getVMSS(ctx context.Context, vmssName string, crt azcache.AzureCacheReadType) (*armcompute.VirtualMachineScaleSet, error) { + logger := log.Background().WithName("getVMSS") getter := func(vmssName string) (*armcompute.VirtualMachineScaleSet, error) { cached, err := ss.vmssCache.Get(ctx, consts.VMSSKey, crt) if err != nil { @@ -193,7 +194,7 @@ func (ss *ScaleSet) getVMSS(ctx context.Context, vmssName string, crt azcache.Az return vmss, nil } - klog.V(2).Infof("Couldn't find VMSS with name %s, refreshing the cache", vmssName) + logger.V(2).Info("Couldn't find VMSS, refreshing the cache", "vmssName", vmssName) _ = ss.vmssCache.Delete(consts.VMSSKey) vmss, err = getter(vmssName) if err != nil { @@ -209,6 +210,7 @@ func (ss *ScaleSet) getVMSS(ctx context.Context, vmssName string, crt azcache.Az // getVmssVMByNodeIdentity find virtualMachineScaleSetVM by nodeIdentity, using node's parent VMSS cache. // Returns cloudprovider.InstanceNotFound if the node does not belong to the scale set named in nodeIdentity. func (ss *ScaleSet) getVmssVMByNodeIdentity(ctx context.Context, node *nodeIdentity, crt azcache.AzureCacheReadType) (*virtualmachine.VirtualMachine, error) { + logger := log.Background().WithName("getVmssVMByNodeIdentity") // FIXME(ccc): check only if vmss is uniform. _, err := getScaleSetVMInstanceID(node.nodeName) if err != nil { @@ -247,11 +249,11 @@ func (ss *ScaleSet) getVmssVMByNodeIdentity(ctx context.Context, node *nodeIdent defer ss.lockMap.UnlockEntry(cacheKey) vm, found, err = getter(ctx, crt) if err == nil && found && vm != nil { - klog.V(2).Infof("found VMSS VM with nodeName %s after retry", node.nodeName) + logger.V(2).Info("found VMSS VM with nodeName after retry", "nodeName", node.nodeName) return vm, nil } - klog.V(2).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache(vmss: %s, rg: %s)", node.nodeName, node.vmssName, node.resourceGroup) + logger.V(2).Info("Couldn't find VMSS VM with nodeName, refreshing the cache", "nodeName", node.nodeName, "vmss", node.vmssName, "resourceGroup", node.resourceGroup) vm, found, err = getter(ctx, azcache.CacheReadTypeForceRefresh) if err != nil { return nil, err @@ -282,6 +284,7 @@ func (ss *ScaleSet) getVmssVM(ctx context.Context, nodeName string, crt azcache. // GetPowerStatusByNodeName returns the power state of the specified node. func (ss *ScaleSet) GetPowerStatusByNodeName(ctx context.Context, name string) (powerState string, err error) { + logger := log.Background().WithName("GetPowerStatusByNodeName") vmManagementType, err := ss.getVMManagementTypeByNodeName(ctx, name, azcache.CacheReadTypeUnsafe) if err != nil { klog.Errorf("Failed to check VM management type: %v", err) @@ -310,7 +313,7 @@ func (ss *ScaleSet) GetPowerStatusByNodeName(ctx context.Context, name string) ( } // vm.Properties.InstanceView or vm.Properties.InstanceView.Statuses are nil when the VM is under deleting. - klog.V(3).Infof("InstanceView for node %q is nil, assuming it's deleting", name) + logger.V(3).Info("InstanceView for node is nil, assuming it's deleting", "node", name) return consts.VMPowerStateUnknown, nil } @@ -346,6 +349,7 @@ func (ss *ScaleSet) GetProvisioningStateByNodeName(ctx context.Context, name str // getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. // The node must belong to one of scale sets. func (ss *ScaleSet) getVmssVMByInstanceID(ctx context.Context, resourceGroup, scaleSetName, instanceID string, crt azcache.AzureCacheReadType) (*armcompute.VirtualMachineScaleSetVM, error) { + logger := log.Background().WithName("getVmssVMByInstanceID") getter := func(ctx context.Context, crt azcache.AzureCacheReadType) (vm *armcompute.VirtualMachineScaleSetVM, found bool, err error) { virtualMachines, err := ss.getVMSSVMsFromCache(ctx, resourceGroup, scaleSetName, crt) if err != nil { @@ -373,7 +377,7 @@ func (ss *ScaleSet) getVmssVMByInstanceID(ctx context.Context, resourceGroup, sc return nil, err } if !found { - klog.V(2).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache", scaleSetName, instanceID) + logger.V(2).Info("Couldn't find VMSS VM with scaleSetName and instanceID, refreshing the cache", "scaleSetName", scaleSetName, "instanceID", instanceID) vm, found, err = getter(ctx, azcache.CacheReadTypeForceRefresh) if err != nil { return nil, err @@ -383,7 +387,7 @@ func (ss *ScaleSet) getVmssVMByInstanceID(ctx context.Context, resourceGroup, sc return vm, nil } if found && vm == nil { - klog.V(2).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache if it is expired", scaleSetName, instanceID) + logger.V(2).Info("Couldn't find VMSS VM with scaleSetName and instanceID, refreshing the cache if it is expired", "scaleSetName", scaleSetName, "instanceID", instanceID) vm, found, err = getter(ctx, azcache.CacheReadTypeDefault) if err != nil { return nil, err @@ -446,6 +450,7 @@ func (ss *ScaleSet) GetInstanceIDByNodeName(ctx context.Context, name string) (s // azure:///subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/1 // /subscriptions/subsid/resourceGroups/rg/providers/Microsoft.Compute/virtualMachineScaleSets/aks-agentpool-22126781-vmss/virtualMachines/k8s-agentpool-36841236-vmss_1 func (ss *ScaleSet) GetNodeNameByProviderID(ctx context.Context, providerID string) (types.NodeName, error) { + logger := log.Background().WithName("GetNodeNameByProviderID") vmManagementType, err := ss.getVMManagementTypeByProviderID(ctx, providerID, azcache.CacheReadTypeUnsafe) if err != nil { klog.Errorf("Failed to check VM management type: %v", err) @@ -473,7 +478,7 @@ func (ss *ScaleSet) GetNodeNameByProviderID(ctx context.Context, providerID stri instanceID, err := getLastSegment(providerID, "/") if err != nil { - klog.V(4).Infof("Can not extract instanceID from providerID (%s), assuming it is managed by availability set: %v", providerID, err) + logger.V(4).Error(err, "Can not extract instanceID from providerID, assuming it is managed by availability set", "providerID", providerID) return ss.availabilitySet.GetNodeNameByProviderID(ctx, providerID) } @@ -646,6 +651,7 @@ func (ss *ScaleSet) GetIPByNodeName(ctx context.Context, nodeName string) (strin } func (ss *ScaleSet) getVMSSPublicIPAddress(resourceGroupName string, virtualMachineScaleSetName string, virtualMachineIndex string, networkInterfaceName string, IPConfigurationName string, publicIPAddressName string) (*armnetwork.PublicIPAddress, bool, error) { + logger := log.Background().WithName("getVMSSPublicIPAddress") ctx, cancel := getContextWithCancel() defer cancel() @@ -656,7 +662,7 @@ func (ss *ScaleSet) getVMSSPublicIPAddress(resourceGroupName string, virtualMach } if !exists { - klog.V(2).Infof("Public IP %q not found", publicIPAddressName) + logger.V(2).Info("Public IP not found", "publicIPAddressName", publicIPAddressName) return nil, false, nil } @@ -761,6 +767,7 @@ func extractResourceGroupByProviderID(providerID string) (string, error) { // getNodeIdentityByNodeName use the VMSS cache to find a node's resourcegroup and vmss, returned in a nodeIdentity. func (ss *ScaleSet) getNodeIdentityByNodeName(ctx context.Context, nodeName string, crt azcache.AzureCacheReadType) (*nodeIdentity, error) { + logger := log.Background().WithName("getNodeIdentityByNodeName") getter := func(nodeName string, crt azcache.AzureCacheReadType) (*nodeIdentity, error) { node := &nodeIdentity{ nodeName: nodeName, @@ -809,7 +816,7 @@ func (ss *ScaleSet) getNodeIdentityByNodeName(ctx context.Context, nodeName stri return node, nil } - klog.V(2).Infof("Couldn't find VMSS for node %s, refreshing the cache", nodeName) + logger.V(2).Info("Couldn't find VMSS for node, refreshing the cache", "node", nodeName) node, err = getter(nodeName, azcache.CacheReadTypeForceRefresh) if err != nil { return nil, err @@ -823,16 +830,17 @@ func (ss *ScaleSet) getNodeIdentityByNodeName(ctx context.Context, nodeName stri // listScaleSetVMs lists VMs belonging to the specified scale set. func (ss *ScaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]*armcompute.VirtualMachineScaleSetVM, error) { + logger := log.Background().WithName("listScaleSetVMs") ctx, cancel := getContextWithCancel() defer cancel() var allVMs []*armcompute.VirtualMachineScaleSetVM var rerr error if ss.ListVmssVirtualMachinesWithoutInstanceView { - klog.V(6).Info("listScaleSetVMs called for scaleSetName: ", scaleSetName, " resourceGroup: ", resourceGroup) + logger.V(6).Info("listScaleSetVMs called for scaleSetName:", "scaleSetName", scaleSetName, "resourceGroup", resourceGroup) allVMs, rerr = ss.ComputeClientFactory.GetVirtualMachineScaleSetVMClient().List(ctx, resourceGroup, scaleSetName) } else { - klog.V(6).Info("listScaleSetVMs called for scaleSetName with instanceView: ", scaleSetName, " resourceGroup: ", resourceGroup) + logger.V(6).Info("listScaleSetVMs called for scaleSetNamewith instanceView", "scaleSetName", scaleSetName, "resourceGroup", resourceGroup) allVMs, rerr = ss.ComputeClientFactory.GetVirtualMachineScaleSetVMClient().ListVMInstanceView(ctx, resourceGroup, scaleSetName) } if rerr != nil { @@ -849,6 +857,7 @@ func (ss *ScaleSet) listScaleSetVMs(scaleSetName, resourceGroup string) ([]*armc // getAgentPoolScaleSets lists the virtual machines for the resource group and then builds // a list of scale sets that match the nodes available to k8s. func (ss *ScaleSet) getAgentPoolScaleSets(ctx context.Context, nodes []*v1.Node) ([]string, error) { + logger := log.Background().WithName("getAgentPoolScaleSets") agentPoolScaleSets := []string{} for nx := range nodes { if isControlPlaneNode(nodes[nx]) { @@ -871,7 +880,7 @@ func (ss *ScaleSet) getAgentPoolScaleSets(ctx context.Context, nodes []*v1.Node) } if vm.VMSSName == "" { - klog.V(3).Infof("Node %q is not belonging to any known scale sets", nodeName) + logger.V(3).Info("Node is not belonging to any known scale sets", "node", nodeName) continue } @@ -1194,7 +1203,8 @@ func getVmssAndResourceGroupNameByVMID(id string) (string, string, error) { } func (ss *ScaleSet) ensureVMSSInPool(ctx context.Context, _ *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error { - klog.V(2).Infof("ensureVMSSInPool: ensuring VMSS with backendPoolID %s", backendPoolID) + logger := log.Background().WithName("ensureVMSSInPool") + logger.V(2).Info("ensuring VMSS with backendPoolID", "backendPoolID", backendPoolID) vmssNamesMap := make(map[string]bool) // the single standard load balancer supports multiple vmss in its backend while @@ -1211,7 +1221,7 @@ func (ss *ScaleSet) ensureVMSSInPool(ctx context.Context, _ *v1.Service, nodes [ return err } if shouldExcludeLoadBalancer { - klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name) + logger.V(4).Info("Excluding unmanaged/external-resource-group node", "node", node.Name) continue } @@ -1220,11 +1230,11 @@ func (ss *ScaleSet) ensureVMSSInPool(ctx context.Context, _ *v1.Service, nodes [ if node.Spec.ProviderID != "" { resourceGroupName, vmssName, err = getVmssAndResourceGroupNameByVMProviderID(node.Spec.ProviderID) if err != nil { - klog.V(4).Infof("ensureVMSSInPool: the provider ID %s of node %s is not the format of VMSS VM, will skip checking and continue", node.Spec.ProviderID, node.Name) + logger.V(4).Info("the provider ID of node is not the format of VMSS VM, will skip checking and continue", "providerID", node.Spec.ProviderID, "node", node.Name) continue } } else { - klog.V(4).Infof("ensureVMSSInPool: the provider ID of node %s is empty, will check the VM ID", node.Name) + logger.V(4).Info("the provider ID of node is empty, will check the VM ID", "node", node.Name) instanceID, err := ss.InstanceID(ctx, types.NodeName(node.Name)) if err != nil { klog.Errorf("ensureVMSSInPool: Failed to get instance ID for node %q: %v", node.Name, err) @@ -1232,7 +1242,7 @@ func (ss *ScaleSet) ensureVMSSInPool(ctx context.Context, _ *v1.Service, nodes [ } resourceGroupName, vmssName, err = getVmssAndResourceGroupNameByVMID(instanceID) if err != nil { - klog.V(4).Infof("ensureVMSSInPool: the instance ID %s of node %s is not the format of VMSS VM, will skip checking and continue", node.Spec.ProviderID, node.Name) + logger.V(4).Info("the instance ID of node is not the format of VMSS VM, will skip checking and continue", "instanceID", instanceID, "node", node.Name) continue } } @@ -1245,7 +1255,7 @@ func (ss *ScaleSet) ensureVMSSInPool(ctx context.Context, _ *v1.Service, nodes [ vmssNamesMap[vmSetNameOfLB] = true } - klog.V(2).Infof("ensureVMSSInPool begins to update VMSS %v with backendPoolID %s", vmssNamesMap, backendPoolID) + logger.V(2).Info("begins to update VMSS with backendPoolID", "VMSS", vmssNamesMap, "backendPoolID", backendPoolID) for vmssName := range vmssNamesMap { vmss, err := ss.getVMSS(ctx, vmssName, azcache.CacheReadTypeDefault) if err != nil { @@ -1255,19 +1265,19 @@ func (ss *ScaleSet) ensureVMSSInPool(ctx context.Context, _ *v1.Service, nodes [ // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. if vmss.Properties.ProvisioningState != nil && strings.EqualFold(*vmss.Properties.ProvisioningState, consts.ProvisionStateDeleting) { - klog.V(3).Infof("ensureVMSSInPool: found vmss %s being deleted, skipping", vmssName) + logger.V(3).Info("found vmss being deleted, skipping", "vmss", vmssName) continue } if vmss.Properties.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil { - klog.V(4).Infof("EnsureHostInPool: cannot obtain the primary network interface configuration of vmss %s", vmssName) + logger.V(4).Info("cannot obtain the primary network interface configuration of vmss", "vmss", vmssName) continue } // It is possible to run Windows 2019 nodes in IPv4-only mode in a dual-stack cluster. IPv6 is not supported on // Windows 2019 nodes and therefore does not need to be added to the IPv6 backend pool. if isWindows2019(vmss) && isBackendPoolIPv6(backendPoolID) { - klog.V(3).Infof("ensureVMSSInPool: vmss %s is Windows 2019, skipping adding to IPv6 backend pool", vmssName) + logger.V(3).Info("vmss is Windows 2019, skipping adding to IPv6 backend pool", "vmss", vmssName) continue } @@ -1314,7 +1324,7 @@ func (ss *ScaleSet) ensureVMSSInPool(ctx context.Context, _ *v1.Service, nodes [ return err } if !isSameLB { - klog.V(4).Infof("VMSS %q has already been added to LB %q, omit adding it to a new one", vmssName, oldLBName) + logger.V(4).Info("VMSS has already been added to LB, omit adding it to a new one", "vmss", vmssName, "LB", oldLBName) return nil } } @@ -1345,7 +1355,7 @@ func (ss *ScaleSet) ensureVMSSInPool(ctx context.Context, _ *v1.Service, nodes [ _ = ss.vmssCache.Delete(consts.VMSSKey) }() - klog.V(2).Infof("ensureVMSSInPool begins to update vmss(%s) with new backendPoolID %s", vmssName, backendPoolID) + logger.V(2).Info("begins to update vmss with new backendPoolID", "vmss", vmssName, "backendPoolID", backendPoolID) rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS) if rerr != nil { klog.Errorf("ensureVMSSInPool CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssName, backendPoolID, rerr) @@ -1396,6 +1406,7 @@ func isWindows2019(vmss *armcompute.VirtualMachineScaleSet) bool { } func (ss *ScaleSet) ensureHostsInPool(ctx context.Context, service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error { + logger := log.Background().WithName("ensureHostsInPool") mc := metrics.NewMetricContext("services", "vmss_ensure_hosts_in_pool", ss.ResourceGroup, ss.SubscriptionID, getServiceName(service)) isOperationSucceeded := false defer func() { @@ -1416,7 +1427,7 @@ func (ss *ScaleSet) ensureHostsInPool(ctx context.Context, service *v1.Service, localNodeName := node.Name if ss.UseStandardLoadBalancer() && ss.ExcludeMasterNodesFromStandardLB() && isControlPlaneNode(node) { - klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID) + logger.V(4).Info("Excluding master node from load balancer backendpool", "node", localNodeName, "backendPoolID", backendPoolID) continue } @@ -1426,7 +1437,7 @@ func (ss *ScaleSet) ensureHostsInPool(ctx context.Context, service *v1.Service, return err } if shouldExcludeLoadBalancer { - klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) + logger.V(4).Info("Excluding unmanaged/external-resource-group node", "node", localNodeName) continue } @@ -1505,6 +1516,7 @@ func (ss *ScaleSet) ensureHostsInPool(ctx context.Context, service *v1.Service, // EnsureHostsInPool ensures the given Node's primary IP configurations are // participating in the specified LoadBalancer Backend Pool. func (ss *ScaleSet) EnsureHostsInPool(ctx context.Context, service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error { + logger := log.Background().WithName("EnsureHostsInPool") if ss.DisableAvailabilitySetNodes && !ss.EnableVmssFlexNodes { return ss.ensureHostsInPool(ctx, service, nodes, backendPoolID, vmSetNameOfLB) } @@ -1516,7 +1528,7 @@ func (ss *ScaleSet) EnsureHostsInPool(ctx context.Context, service *v1.Service, localNodeName := node.Name if ss.UseStandardLoadBalancer() && ss.ExcludeMasterNodesFromStandardLB() && isControlPlaneNode(node) { - klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID) + logger.V(4).Info("Excluding master node from load balancer backendpool", "node", localNodeName, "backendPoolID", backendPoolID) continue } @@ -1526,7 +1538,7 @@ func (ss *ScaleSet) EnsureHostsInPool(ctx context.Context, service *v1.Service, return err } if shouldExcludeLoadBalancer { - klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) + logger.V(4).Info("Excluding unmanaged/external-resource-group node", "node", localNodeName) continue } @@ -1544,7 +1556,7 @@ func (ss *ScaleSet) EnsureHostsInPool(ctx context.Context, service *v1.Service, vmasNodes = append(vmasNodes, node) continue } - klog.V(3).Infof("EnsureHostsInPool skips node %s because VMAS nodes couldn't be added to basic LB with VMSS backends", localNodeName) + logger.V(3).Info("EnsureHostsInPool skips node because VMAS nodes couldn't be added to basic LB with VMSS backends", "node", localNodeName) continue } if vmManagementType == ManagedByVmssFlex { @@ -1553,7 +1565,7 @@ func (ss *ScaleSet) EnsureHostsInPool(ctx context.Context, service *v1.Service, vmssFlexNodes = append(vmssFlexNodes, node) continue } - klog.V(3).Infof("EnsureHostsInPool skips node %s because VMSS Flex nodes deos not support Basic Load Balancer", localNodeName) + logger.V(3).Info("EnsureHostsInPool skips node because VMSS Flex nodes deos not support Basic Load Balancer", "node", localNodeName) continue } vmssUniformNodes = append(vmssUniformNodes, node) @@ -1603,8 +1615,8 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromNode(ctx context.Context, nodeNa // Find primary network interface configuration. if vm.VirtualMachineScaleSetVMProperties.NetworkProfileConfiguration.NetworkInterfaceConfigurations == nil { - klog.V(4).Infof("ensureBackendPoolDeletedFromNode: cannot obtain the primary network interface configuration, of vm %s, "+ - "probably because the vm's being deleted", nodeName) + logger.V(4).Info("ensureBackendPoolDeletedFromNode: cannot obtain the primary network interface configuration, of vm, "+ + "probably because the vm's being deleted", "vm", nodeName) return "", "", "", nil, nil } networkInterfaceConfigurations := vm.VirtualMachineScaleSetVMProperties.NetworkProfileConfiguration.NetworkInterfaceConfigurations @@ -1687,9 +1699,10 @@ func (ss *ScaleSet) GetNodeNameByIPConfigurationID(ctx context.Context, ipConfig } func getScaleSetAndResourceGroupNameByIPConfigurationID(ipConfigurationID string) (string, string, error) { + logger := log.Background().WithName("getScaleSetAndResourceGroupNameByIPConfigurationID") matches := vmssIPConfigurationRE.FindStringSubmatch(ipConfigurationID) if len(matches) != 4 { - klog.V(4).Infof("Can not extract scale set name from ipConfigurationID (%s), assuming it is managed by availability set or vmss flex", ipConfigurationID) + logger.V(4).Info("Can not extract scale set name from ipConfigurationID, assuming it is managed by availability set or vmss flex", "ipConfigurationID", ipConfigurationID) return "", "", ErrorNotVmssInstance } @@ -1756,6 +1769,7 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromVMSS(ctx context.Context, backen } func (ss *ScaleSet) ensureBackendPoolDeletedFromVmssUniform(ctx context.Context, backendPoolIDs []string, vmSetName string) error { + logger := log.Background().WithName("ensureBackendPoolDeletedFromVmssUniform") vmssNamesMap := make(map[string]bool) // the standard load balancer supports multiple vmss in its backend while the basic SKU doesn't if ss.UseStandardLoadBalancer() { @@ -1774,20 +1788,20 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromVmssUniform(ctx context.Context, } else if v, ok := value.(*armcompute.VirtualMachineScaleSet); ok { vmss = v } - klog.V(2).Infof("ensureBackendPoolDeletedFromVmssUniform: vmss %q, backendPoolIDs %q", ptr.Deref(vmss.Name, ""), backendPoolIDs) + logger.V(2).Info("", "vmssName", ptr.Deref(vmss.Name, ""), "backendPoolIDs", backendPoolIDs) // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. if vmss.Properties.ProvisioningState != nil && strings.EqualFold(*vmss.Properties.ProvisioningState, consts.ProvisionStateDeleting) { - klog.V(3).Infof("ensureBackendPoolDeletedFromVMSS: found vmss %s being deleted, skipping", ptr.Deref(vmss.Name, "")) + logger.V(3).Info("found vmss being deleted, skipping", "vmss", ptr.Deref(vmss.Name, "")) return true } if vmss.Properties.VirtualMachineProfile == nil { - klog.V(4).Infof("ensureBackendPoolDeletedFromVMSS: vmss %s has no VirtualMachineProfile, skipping", ptr.Deref(vmss.Name, "")) + logger.V(4).Info("vmss has no VirtualMachineProfile, skipping", "vmss", ptr.Deref(vmss.Name, "")) return true } if vmss.Properties.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil { - klog.V(4).Infof("ensureBackendPoolDeletedFromVMSS: cannot obtain the primary network interface configuration, of vmss %s", ptr.Deref(vmss.Name, "")) + logger.V(4).Info("cannot obtain the primary network interface configuration, of vmss", "vmss", ptr.Deref(vmss.Name, "")) return true } vmssNIC := vmss.Properties.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations @@ -1810,9 +1824,9 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromVmssUniform(ctx context.Context, loadBalancerBackendAddressPools = primaryIPConfig.Properties.LoadBalancerBackendAddressPools } for _, loadBalancerBackendAddressPool := range loadBalancerBackendAddressPools { - klog.V(4).Infof("ensureBackendPoolDeletedFromVMSS: loadBalancerBackendAddressPool (%s) on vmss (%s)", ptr.Deref(loadBalancerBackendAddressPool.ID, ""), ptr.Deref(vmss.Name, "")) + logger.V(4).Info("loadBalancerBackendAddressPool on vmss", "backendAddressPool", ptr.Deref(loadBalancerBackendAddressPool.ID, ""), "vmss", ptr.Deref(vmss.Name, "")) if strings.EqualFold(ptr.Deref(loadBalancerBackendAddressPool.ID, ""), backendPoolID) { - klog.V(4).Infof("ensureBackendPoolDeletedFromVMSS: found vmss %s with backend pool %s, removing it", ptr.Deref(vmss.Name, ""), backendPoolID) + logger.V(4).Info("found vmss with backend pool, removing it", "vmss", ptr.Deref(vmss.Name, ""), "backendPool", backendPoolID) vmssNamesMap[ptr.Deref(vmss.Name, "")] = true } } @@ -1833,7 +1847,7 @@ func (ss *ScaleSet) ensureBackendPoolDeletedFromVmssUniform(ctx context.Context, return utilerrors.Flatten(utilerrors.NewAggregate(errorList)) } } else { - klog.V(2).Infof("ensureBackendPoolDeletedFromVmssUniform: vmss %q, backendPoolIDs %q", vmSetName, backendPoolIDs) + logger.V(2).Info("", "vmss", vmSetName, "backendPoolIDs", backendPoolIDs) vmssNamesMap[vmSetName] = true } @@ -2146,6 +2160,7 @@ func (ss *ScaleSet) GetNodeCIDRMasksByProviderID(ctx context.Context, providerID // deleteBackendPoolFromIPConfig deletes the backend pool from the IP config. func deleteBackendPoolFromIPConfig(msg, backendPoolID, resource string, primaryNIC *armcompute.VirtualMachineScaleSetNetworkConfiguration) (bool, error) { + logger := log.Background().WithName("deleteBackendPoolFromIPConfig") primaryIPConfig, err := getPrimaryIPConfigFromVMSSNetworkConfig(primaryNIC, backendPoolID, resource) if err != nil { klog.Errorf("%s: failed to get the primary IP config from the VMSS %q's network config: %v", msg, resource, err) @@ -2161,7 +2176,7 @@ func deleteBackendPoolFromIPConfig(msg, backendPoolID, resource string, primaryN for i := len(loadBalancerBackendAddressPools) - 1; i >= 0; i-- { curPool := loadBalancerBackendAddressPools[i] if strings.EqualFold(backendPoolID, *curPool.ID) { - klog.V(10).Infof("%s gets unwanted backend pool %q for VMSS OR VMSS VM %q", msg, backendPoolID, resource) + logger.V(10).Info("gets unwanted backend pool for VMSS OR VMSS VM", "msg", msg, "backendPoolID", backendPoolID, "resource", resource) found = true newBackendPools = append(loadBalancerBackendAddressPools[:i], loadBalancerBackendAddressPools[i+1:]...) } @@ -2175,6 +2190,7 @@ func deleteBackendPoolFromIPConfig(msg, backendPoolID, resource string, primaryN // EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMSS func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(ctx context.Context, vmssNamesMap map[string]bool, backendPoolIDs []string) error { + logger := log.Background().WithName("EnsureBackendPoolDeletedFromVMSets") vmssUpdaters := make([]func() error, 0, len(vmssNamesMap)) errors := make([]error, 0, len(vmssNamesMap)) for vmssName := range vmssNamesMap { @@ -2189,11 +2205,11 @@ func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(ctx context.Context, vmss // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. if vmss.Properties.ProvisioningState != nil && strings.EqualFold(*vmss.Properties.ProvisioningState, consts.ProvisionStateDeleting) { - klog.V(3).Infof("EnsureBackendPoolDeletedFromVMSets: found vmss %s being deleted, skipping", vmssName) + logger.V(3).Info("found vmss being deleted, skipping", "vmss", vmssName) continue } if vmss.Properties.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil { - klog.V(4).Infof("EnsureBackendPoolDeletedFromVMSets: cannot obtain the primary network interface configuration, of vmss %s", vmssName) + logger.V(4).Info("cannot obtain the primary network interface configuration, of vmss", "vmss", vmssName) continue } vmssNIC := vmss.Properties.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations @@ -2236,7 +2252,7 @@ func (ss *ScaleSet) EnsureBackendPoolDeletedFromVMSets(ctx context.Context, vmss _ = ss.vmssCache.Delete(consts.VMSSKey) }() - klog.V(2).Infof("EnsureBackendPoolDeletedFromVMSets begins to update vmss(%s) with backendPoolIDs %q", vmssName, backendPoolIDs) + logger.V(2).Info("begins to update vmss with backendPoolIDs", "vmss", vmssName, "backendPoolIDs", backendPoolIDs) rerr := ss.CreateOrUpdateVMSS(ss.ResourceGroup, vmssName, newVMSS) if rerr != nil { klog.Errorf("EnsureBackendPoolDeletedFromVMSets CreateOrUpdateVMSS(%s) with new backendPoolIDs %q, err: %v", vmssName, backendPoolIDs, rerr) @@ -2314,6 +2330,7 @@ func (ss *ScaleSet) GetAgentPoolVMSetNames(ctx context.Context, nodes []*v1.Node } func (ss *ScaleSet) GetNodeVMSetName(ctx context.Context, node *v1.Node) (string, error) { + logger := log.Background().WithName("GetNodeVMSetName") vmManagementType, err := ss.getVMManagementTypeByNodeName(ctx, node.Name, azcache.CacheReadTypeUnsafe) if err != nil { klog.Errorf("Failed to check VM management type: %v", err) @@ -2336,12 +2353,13 @@ func (ss *ScaleSet) GetNodeVMSetName(ctx context.Context, node *v1.Node) (string return "", err } - klog.V(4).Infof("ss.GetNodeVMSetName: found vmss name %s from node name %s", vmssName, node.Name) + logger.V(4).Info("found vmss name from node name", "vmssName", vmssName, "nodeName", node.Name) return vmssName, nil } // VMSSBatchSize returns the batch size for VMSS operations. func (ss *ScaleSet) VMSSBatchSize(ctx context.Context, vmssName string) (int, error) { + logger := log.Background().WithName("VMSSBatchSize") batchSize := 1 vmss, err := ss.getVMSS(ctx, vmssName, azcache.CacheReadTypeDefault) if err != nil { @@ -2353,7 +2371,7 @@ func (ss *ScaleSet) VMSSBatchSize(ctx context.Context, vmssName string) (int, er if batchSize < 1 { batchSize = 1 } - klog.V(2).InfoS("Fetch VMSS batch size", "vmss", vmssName, "size", batchSize) + logger.V(2).Info("Fetch VMSS batch size", "vmss", vmssName, "size", batchSize) return batchSize, nil } diff --git a/pkg/provider/azure_vmss_cache.go b/pkg/provider/azure_vmss_cache.go index 4eeae499b9..38d6c5915e 100644 --- a/pkg/provider/azure_vmss_cache.go +++ b/pkg/provider/azure_vmss_cache.go @@ -29,6 +29,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/util/errutils" utilsets "sigs.k8s.io/cloud-provider-azure/pkg/util/sets" ) @@ -65,6 +66,7 @@ const ( ) func (ss *ScaleSet) newVMSSCache() (azcache.Resource, error) { + logger := log.Background().WithName("newVMSSCache") getter := func(ctx context.Context, _ string) (interface{}, error) { localCache := &sync.Map{} // [vmssName]*vmssEntry @@ -109,7 +111,7 @@ func (ss *ScaleSet) newVMSSCache() (azcache.Resource, error) { for _, cacheKey := range vmssVMKeys { vmssName := cacheKey[strings.LastIndex(cacheKey, "/")+1:] if _, ok := localCache.Load(vmssName); !ok { - klog.V(2).Infof("remove vmss %s from vmssVMCache due to rg not found", cacheKey) + logger.V(2).Info("remove vmss from vmssVMCache due to rg not found", "vmss", cacheKey) _ = ss.vmssVMCache.Delete(cacheKey) } } @@ -142,6 +144,7 @@ func (ss *ScaleSet) getVMSSVMsFromCache(ctx context.Context, resourceGroup, vmss // newVMSSVirtualMachinesCache instantiates a new VMs cache for VMs belonging to the provided VMSS. func (ss *ScaleSet) newVMSSVirtualMachinesCache() (azcache.Resource, error) { + logger := log.Background().WithName("newVMSSVirtualMachinesCache") vmssVirtualMachinesCacheTTL := time.Duration(ss.VmssVirtualMachinesCacheTTLInSeconds) * time.Second getter := func(_ context.Context, cacheKey string) (interface{}, error) { @@ -201,7 +204,7 @@ func (ss *ScaleSet) newVMSSVirtualMachinesCache() (azcache.Resource, error) { // set cache entry to nil when the VM is under deleting. if vm.Properties != nil && strings.EqualFold(ptr.Deref(vm.Properties.ProvisioningState, ""), string(consts.ProvisioningStateDeleting)) { - klog.V(4).Infof("VMSS virtualMachine %q is under deleting, setting its cache to nil", computerName) + logger.V(4).Info("VMSS virtualMachine is under deleting, setting its cache to nil", "VM", computerName) vmssVMCacheEntry.VirtualMachine = nil } localCache.Store(computerName, vmssVMCacheEntry) @@ -218,7 +221,7 @@ func (ss *ScaleSet) newVMSSVirtualMachinesCache() (azcache.Resource, error) { // if the nil cache entry has existed for vmssVirtualMachinesCacheTTL in the cache // then it should not be added back to the cache if vmEntry.VirtualMachine == nil && time.Since(vmEntry.LastUpdate) > vmssVirtualMachinesCacheTTL { - klog.V(5).Infof("ignoring expired entries from old cache for %s", name) + logger.V(5).Info("ignoring expired entries from old cache", "name", name) continue } LastUpdate := time.Now().UTC() @@ -228,7 +231,7 @@ func (ss *ScaleSet) newVMSSVirtualMachinesCache() (azcache.Resource, error) { LastUpdate = vmEntry.LastUpdate } - klog.V(5).Infof("adding old entries to new cache for %s", name) + logger.V(5).Info("adding old entries to new cache", "name", name) localCache.Store(name, &VMSSVirtualMachineEntry{ ResourceGroup: vmEntry.ResourceGroup, VMSSName: vmEntry.VMSSName, @@ -247,6 +250,7 @@ func (ss *ScaleSet) newVMSSVirtualMachinesCache() (azcache.Resource, error) { // DeleteCacheForNode deletes Node from VMSS VM and VM caches. func (ss *ScaleSet) DeleteCacheForNode(ctx context.Context, nodeName string) error { + logger := log.Background().WithName("DeleteCacheForNode") if ss.DisableAPICallCache { return nil } @@ -283,11 +287,12 @@ func (ss *ScaleSet) DeleteCacheForNode(ctx context.Context, nodeName string) err virtualMachines.Delete(nodeName) ss.vmssVMCache.Update(cacheKey, virtualMachines) - klog.V(2).Infof("DeleteCacheForNode(%s, %s, %s) successfully", node.resourceGroup, node.vmssName, nodeName) + logger.V(2).Info("successfully deleted cache for node", "resourceGroup", node.resourceGroup, "vmssName", node.vmssName, "node", nodeName) return nil } func (ss *ScaleSet) updateCache(ctx context.Context, nodeName, resourceGroupName, vmssName, instanceID string, updatedVM *armcompute.VirtualMachineScaleSetVM) error { + logger := log.Background().WithName("updateCache") if nodeName == "" { return fmt.Errorf("updateCache(%s, %s, %s) failed with empty nodeName", vmssName, resourceGroupName, nodeName) } @@ -325,12 +330,13 @@ func (ss *ScaleSet) updateCache(ctx context.Context, nodeName, resourceGroupName }) ss.vmssVMCache.Update(cacheKey, localCache) - klog.V(2).Infof("updateCache(%s, %s, %s) for cacheKey(%s) updated successfully", vmssName, resourceGroupName, nodeName, cacheKey) + logger.V(2).Info("updated successfully", "vmssName", vmssName, "resourceGroupName", resourceGroupName, "node", nodeName, "cacheKey", cacheKey) return nil } func (ss *ScaleSet) newNonVmssUniformNodesCache() (azcache.Resource, error) { getter := func(ctx context.Context, _ string) (interface{}, error) { + logger := log.Background().WithName("newNonVmssUniformNodesCache") vmssFlexVMNodeNames := utilsets.NewString() vmssFlexVMProviderIDs := utilsets.NewString() avSetVMNodeNames := utilsets.NewString() @@ -339,7 +345,7 @@ func (ss *ScaleSet) newNonVmssUniformNodesCache() (azcache.Resource, error) { if err != nil { return nil, err } - klog.V(2).Infof("refresh the cache of NonVmssUniformNodesCache in rg %v", resourceGroups) + logger.V(2).Info("refresh the cache of NonVmssUniformNodesCache", "resourceGroups", resourceGroups) for _, resourceGroup := range resourceGroups.UnsortedList() { vms, err := ss.ListVirtualMachines(ctx, resourceGroup) @@ -387,6 +393,7 @@ func (ss *ScaleSet) newNonVmssUniformNodesCache() (azcache.Resource, error) { } func (ss *ScaleSet) getVMManagementTypeByNodeName(ctx context.Context, nodeName string, crt azcache.AzureCacheReadType) (VMManagementType, error) { + logger := log.Background().WithName("getVMManagementTypeByNodeName") if ss.DisableAvailabilitySetNodes && !ss.EnableVmssFlexNodes { return ManagedByVmssUniform, nil } @@ -422,7 +429,7 @@ func (ss *ScaleSet) getVMManagementTypeByNodeName(ctx context.Context, nodeName return ManagedByVmssUniform, nil } - klog.V(2).Infof("Node %s has joined the cluster since the last VM cache refresh in NonVmssUniformNodesEntry, refreshing the cache", nodeName) + logger.V(2).Info("Node has joined the cluster since the last VM cache refresh in NonVmssUniformNodesEntry, refreshing the cache", "node", nodeName) cached, err = ss.nonVmssUniformNodesCache.Get(ctx, consts.NonVmssUniformNodesKey, azcache.CacheReadTypeForceRefresh) if err != nil { return ManagedByUnknownVMSet, err diff --git a/pkg/provider/azure_vmss_repo.go b/pkg/provider/azure_vmss_repo.go index 3b0697b1d1..0a065937b9 100644 --- a/pkg/provider/azure_vmss_repo.go +++ b/pkg/provider/azure_vmss_repo.go @@ -23,23 +23,25 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) // CreateOrUpdateVMSS invokes az.ComputeClientFactory.GetVirtualMachineScaleSetClient().Update(). func (az *Cloud) CreateOrUpdateVMSS(resourceGroupName string, VMScaleSetName string, parameters armcompute.VirtualMachineScaleSet) error { + logger := log.Background().WithName("CreateOrUpdateVMSS") ctx, cancel := getContextWithCancel() defer cancel() // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. - klog.V(3).Infof("CreateOrUpdateVMSS: verify the status of the vmss being created or updated") + logger.V(3).Info("verify the status of the vmss being created or updated") vmss, err := az.ComputeClientFactory.GetVirtualMachineScaleSetClient().Get(ctx, resourceGroupName, VMScaleSetName, nil) if err != nil { klog.Errorf("CreateOrUpdateVMSS: error getting vmss(%s): %v", VMScaleSetName, err) return err } if vmss.Properties.ProvisioningState != nil && strings.EqualFold(*vmss.Properties.ProvisioningState, consts.ProvisionStateDeleting) { - klog.V(3).Infof("CreateOrUpdateVMSS: found vmss %s being deleted, skipping", VMScaleSetName) + logger.V(3).Info("found vmss being deleted, skipping", "vmss", VMScaleSetName) return nil } diff --git a/pkg/provider/azure_vmssflex.go b/pkg/provider/azure_vmssflex.go index d58628006c..0b4c21a331 100644 --- a/pkg/provider/azure_vmssflex.go +++ b/pkg/provider/azure_vmssflex.go @@ -39,6 +39,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/metrics" "sigs.k8s.io/cloud-provider-azure/pkg/util/lockmap" vmutil "sigs.k8s.io/cloud-provider-azure/pkg/util/vm" @@ -274,6 +275,7 @@ func (fs *FlexScaleSet) GetProvisioningStateByNodeName(ctx context.Context, name // GetPowerStatusByNodeName returns the powerState for the specified node. func (fs *FlexScaleSet) GetPowerStatusByNodeName(ctx context.Context, name string) (powerState string, err error) { + logger := log.Background().WithName("fs.GetPowerStatusByNodeName") vm, err := fs.getVmssFlexVM(ctx, name, azcache.CacheReadTypeDefault) if err != nil { return powerState, err @@ -284,7 +286,7 @@ func (fs *FlexScaleSet) GetPowerStatusByNodeName(ctx context.Context, name strin } // vm.Properties.InstanceView or vm.Properties.InstanceView.Statuses are nil when the VM is under deleting. - klog.V(3).Infof("InstanceView for node %q is nil, assuming it's deleting", name) + logger.V(3).Info("InstanceView for node is nil, assuming it's deleting", "node", name) return consts.VMPowerStateUnknown, nil } @@ -448,6 +450,7 @@ func (fs *FlexScaleSet) GetNodeCIDRMasksByProviderID(ctx context.Context, provid // EnsureHostInPool ensures the given VM's Primary NIC's Primary IP Configuration is // participating in the specified LoadBalancer Backend Pool, which returns (resourceGroup, vmasName, instanceID, vmssVM, error). func (fs *FlexScaleSet) EnsureHostInPool(ctx context.Context, service *v1.Service, nodeName types.NodeName, backendPoolID string, vmSetNameOfLB string) (string, string, string, *armcompute.VirtualMachineScaleSetVM, error) { + logger := log.Background().WithName("EnsureHostInPool") serviceName := getServiceName(service) name := mapNodeNameToVMName(nodeName) vmssFlexName, err := fs.getNodeVmssFlexName(ctx, name) @@ -466,7 +469,7 @@ func (fs *FlexScaleSet) EnsureHostInPool(ctx context.Context, service *v1.Servic return "", "", "", nil, fmt.Errorf("EnsureHostInPool: VMSS Flex does not support Basic Load Balancer") } if vmSetNameOfLB != "" && needCheck && !strings.EqualFold(vmSetNameOfLB, vmssFlexName) { - klog.V(3).Infof("EnsureHostInPool skips node %s because it is not in the ScaleSet %s", name, vmSetNameOfLB) + logger.V(3).Info("skips node because it is not in the ScaleSet", "node", name, "ScaleSet", vmSetNameOfLB) return "", "", "", nil, errNotInVMSet } @@ -527,7 +530,7 @@ func (fs *FlexScaleSet) EnsureHostInPool(ctx context.Context, service *v1.Servic return "", "", "", nil, err } if !isSameLB { - klog.V(4).Infof("Node %q has already been added to LB %q, omit adding it to a new one", nodeName, oldLBName) + logger.V(4).Info("Node has already been added to LB, omit adding it to a new one", "node", nodeName, "LB", oldLBName) return "", "", "", nil, nil } } @@ -540,7 +543,7 @@ func (fs *FlexScaleSet) EnsureHostInPool(ctx context.Context, service *v1.Servic primaryIPConfig.Properties.LoadBalancerBackendAddressPools = newBackendPools nicName := *nic.Name - klog.V(3).Infof("nicupdate(%s): nic(%s) - updating", serviceName, nicName) + logger.V(3).Info("updating", "nicupdate", serviceName, "nic", nicName) err = fs.CreateOrUpdateInterface(ctx, service, nic) if err != nil { return "", "", "", nil, err @@ -556,7 +559,8 @@ func (fs *FlexScaleSet) EnsureHostInPool(ctx context.Context, service *v1.Servic } func (fs *FlexScaleSet) ensureVMSSFlexInPool(ctx context.Context, _ *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error { - klog.V(2).Infof("ensureVMSSFlexInPool: ensuring VMSS Flex with backendPoolID %s", backendPoolID) + logger := log.Background().WithName("ensureVMSSFlexInPool") + logger.V(2).Info("ensuring VMSS Flex with backendPoolID", "backendPoolID", backendPoolID) vmssFlexIDsMap := make(map[string]bool) if !fs.UseStandardLoadBalancer() { @@ -577,7 +581,7 @@ func (fs *FlexScaleSet) ensureVMSSFlexInPool(ctx context.Context, _ *v1.Service, return err } if shouldExcludeLoadBalancer { - klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", node.Name) + logger.V(4).Info("Excluding unmanaged/external-resource-group node", "node", node.Name) continue } @@ -607,7 +611,7 @@ func (fs *FlexScaleSet) ensureVMSSFlexInPool(ctx context.Context, _ *v1.Service, vmssFlexIDsMap[vmssFlexID] = true } - klog.V(2).Infof("ensureVMSSFlexInPool begins to update VMSS list %v with backendPoolID %s", vmssFlexIDsMap, backendPoolID) + logger.V(2).Info("begins to update VMSS list with backendPoolID", "VMSS list", vmssFlexIDsMap, "backendPoolID", backendPoolID) for vmssFlexID := range vmssFlexIDsMap { vmssFlex, err := fs.getVmssFlexByVmssFlexID(ctx, vmssFlexID, azcache.CacheReadTypeDefault) if err != nil { @@ -618,12 +622,12 @@ func (fs *FlexScaleSet) ensureVMSSFlexInPool(ctx context.Context, _ *v1.Service, // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. if vmssFlex.Properties.ProvisioningState != nil && strings.EqualFold(*vmssFlex.Properties.ProvisioningState, consts.ProvisionStateDeleting) { - klog.V(3).Infof("ensureVMSSFlexInPool: found vmss %s being deleted, skipping", vmssFlexID) + logger.V(3).Info("found vmss being deleted, skipping", "vmss", vmssFlexID) continue } if vmssFlex.Properties.VirtualMachineProfile == nil || vmssFlex.Properties.VirtualMachineProfile.NetworkProfile == nil || vmssFlex.Properties.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil { - klog.V(4).Infof("ensureVMSSFlexInPool: cannot obtain the primary network interface configuration of vmss %s, just skip it as it might not have default vm profile", vmssFlexID) + logger.V(4).Info("cannot obtain the primary network interface configuration of vmss, just skip it as it might not have default vm profile", "vmss", vmssFlexID) continue } vmssNIC := vmssFlex.Properties.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations @@ -668,7 +672,7 @@ func (fs *FlexScaleSet) ensureVMSSFlexInPool(ctx context.Context, _ *v1.Service, return err } if !isSameLB { - klog.V(4).Infof("VMSS %q has already been added to LB %q, omit adding it to a new one", vmssFlexID, oldLBName) + logger.V(4).Info("VMSS has already been added to LB, omit adding it to a new one", "vmss", vmssFlexID, "LB", oldLBName) return nil } } @@ -695,7 +699,7 @@ func (fs *FlexScaleSet) ensureVMSSFlexInPool(ctx context.Context, _ *v1.Service, _ = fs.vmssFlexCache.Delete(consts.VmssFlexKey) }() - klog.V(2).Infof("ensureVMSSFlexInPool begins to add vmss(%s) with new backendPoolID %s", vmssFlexName, backendPoolID) + logger.V(2).Info("begins to add vmss with new backendPoolID", "vmss", vmssFlexName, "backendPoolID", backendPoolID) rerr := fs.CreateOrUpdateVMSS(fs.ResourceGroup, vmssFlexName, newVMSS) if rerr != nil { klog.Errorf("ensureVMSSFlexInPool CreateOrUpdateVMSS(%s) with new backendPoolID %s, err: %v", vmssFlexName, backendPoolID, err) @@ -708,6 +712,7 @@ func (fs *FlexScaleSet) ensureVMSSFlexInPool(ctx context.Context, _ *v1.Service, // EnsureHostsInPool ensures the given Node's primary IP configurations are // participating in the specified LoadBalancer Backend Pool. func (fs *FlexScaleSet) EnsureHostsInPool(ctx context.Context, service *v1.Service, nodes []*v1.Node, backendPoolID string, vmSetNameOfLB string) error { + logger := log.Background().WithName("EnsureHostsInPool") mc := metrics.NewMetricContext("services", "vmssflex_ensure_hosts_in_pool", fs.ResourceGroup, fs.SubscriptionID, getServiceName(service)) isOperationSucceeded := false defer func() { @@ -723,7 +728,7 @@ func (fs *FlexScaleSet) EnsureHostsInPool(ctx context.Context, service *v1.Servi for _, node := range nodes { localNodeName := node.Name if fs.UseStandardLoadBalancer() && fs.ExcludeMasterNodesFromStandardLB() && isControlPlaneNode(node) { - klog.V(4).Infof("Excluding master node %q from load balancer backendpool %q", localNodeName, backendPoolID) + logger.V(4).Info("Excluding master node from load balancer backendpool", "node", localNodeName, "backendPoolID", backendPoolID) continue } @@ -733,7 +738,7 @@ func (fs *FlexScaleSet) EnsureHostsInPool(ctx context.Context, service *v1.Servi return err } if shouldExcludeLoadBalancer { - klog.V(4).Infof("Excluding unmanaged/external-resource-group node %q", localNodeName) + logger.V(4).Info("Excluding unmanaged/external-resource-group node", "node", localNodeName) continue } @@ -778,6 +783,7 @@ func (fs *FlexScaleSet) ensureBackendPoolDeletedFromVmssFlex(ctx context.Context // EnsureBackendPoolDeletedFromVMSets ensures the loadBalancer backendAddressPools deleted from the specified VMSS Flex func (fs *FlexScaleSet) EnsureBackendPoolDeletedFromVMSets(ctx context.Context, vmssNamesMap map[string]bool, backendPoolIDs []string) error { + logger := log.Background().WithName("fs.EnsureBackendPoolDeletedFromVMSets") vmssUpdaters := make([]func() error, 0, len(vmssNamesMap)) errors := make([]error, 0, len(vmssNamesMap)) for vmssName := range vmssNamesMap { @@ -792,11 +798,11 @@ func (fs *FlexScaleSet) EnsureBackendPoolDeletedFromVMSets(ctx context.Context, // When vmss is being deleted, CreateOrUpdate API would report "the vmss is being deleted" error. // Since it is being deleted, we shouldn't send more CreateOrUpdate requests for it. if vmss.Properties.ProvisioningState != nil && strings.EqualFold(*vmss.Properties.ProvisioningState, consts.ProvisionStateDeleting) { - klog.V(3).Infof("fs.EnsureBackendPoolDeletedFromVMSets: found vmss %s being deleted, skipping", vmssName) + logger.V(3).Info("found vmss being deleted, skipping", "vmss", vmssName) continue } if vmss.Properties.VirtualMachineProfile == nil || vmss.Properties.VirtualMachineProfile.NetworkProfile == nil || vmss.Properties.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations == nil { - klog.V(4).Infof("fs.EnsureBackendPoolDeletedFromVMSets: cannot obtain the primary network interface configurations, of vmss %s", vmssName) + logger.V(4).Info("cannot obtain the primary network interface configurations, of vmss", "vmss", vmssName) continue } vmssNIC := vmss.Properties.VirtualMachineProfile.NetworkProfile.NetworkInterfaceConfigurations @@ -839,7 +845,7 @@ func (fs *FlexScaleSet) EnsureBackendPoolDeletedFromVMSets(ctx context.Context, _ = fs.vmssFlexCache.Delete(consts.VmssFlexKey) }() - klog.V(2).Infof("fs.EnsureBackendPoolDeletedFromVMSets begins to delete backendPoolIDs %q from vmss(%s)", backendPoolIDs, vmssName) + logger.V(2).Info("begins to delete backendPoolIDs from vmss", "backendPoolIDs", backendPoolIDs, "vmss", vmssName) rerr := fs.CreateOrUpdateVMSS(fs.ResourceGroup, vmssName, newVMSS) if rerr != nil { klog.Errorf("fs.EnsureBackendPoolDeletedFromVMSets CreateOrUpdateVMSS(%s) for backendPoolIDs %q, err: %v", vmssName, backendPoolIDs, rerr) @@ -864,6 +870,7 @@ func (fs *FlexScaleSet) EnsureBackendPoolDeletedFromVMSets(ctx context.Context, // EnsureBackendPoolDeleted ensures the loadBalancer backendAddressPools deleted from the specified nodes. func (fs *FlexScaleSet) EnsureBackendPoolDeleted(ctx context.Context, service *v1.Service, backendPoolIDs []string, vmSetName string, backendAddressPools []*armnetwork.BackendAddressPool, deleteFromVMSet bool) (bool, error) { + logger := log.Background().WithName("EnsureBackendPoolDeleted") // Returns nil if backend address pools already deleted. if backendAddressPools == nil { return false, nil @@ -920,7 +927,7 @@ func (fs *FlexScaleSet) EnsureBackendPoolDeleted(ctx context.Context, service *v } } - klog.V(2).Infof("Ensure backendPoolIDs %q deleted from the VMSS.", backendPoolIDs) + logger.V(2).Info("Ensure backendPoolIDs deleted from the VMSS", "backendPoolIDs", backendPoolIDs) if deleteFromVMSet { err := fs.ensureBackendPoolDeletedFromVmssFlex(ctx, backendPoolIDs, vmSetName) if err != nil { @@ -928,10 +935,10 @@ func (fs *FlexScaleSet) EnsureBackendPoolDeleted(ctx context.Context, service *v } } - klog.V(2).Infof("Ensure backendPoolIDs %q deleted from the VMSS VMs.", backendPoolIDs) - klog.V(2).Infof("go into fs.ensureBackendPoolDeletedFromNode, vmssFlexVMNameMap: %s, size: %d", vmssFlexVMNameMap, len(vmssFlexVMNameMap)) + logger.V(2).Info("Ensure backendPoolIDs deleted from the VMSS VMs", "backendPoolIDs", backendPoolIDs) + logger.V(2).Info("go into fs.ensureBackendPoolDeletedFromNode", "vmssFlexVMNameMap", vmssFlexVMNameMap, "size", len(vmssFlexVMNameMap)) nicUpdated, err := fs.ensureBackendPoolDeletedFromNode(ctx, vmssFlexVMNameMap, backendPoolIDs) - klog.V(2).Infof("exit from fs.ensureBackendPoolDeletedFromNode") + logger.V(2).Info("exit from fs.ensureBackendPoolDeletedFromNode") if err != nil { allErrs = append(allErrs, err) } @@ -945,6 +952,7 @@ func (fs *FlexScaleSet) EnsureBackendPoolDeleted(ctx context.Context, service *v } func (fs *FlexScaleSet) ensureBackendPoolDeletedFromNode(ctx context.Context, vmssFlexVMNameMap map[string]string, backendPoolIDs []string) (bool, error) { + logger := log.Background().WithName("ensureBackendPoolDeletedFromNode") nicUpdaters := make([]func() error, 0) allErrs := make([]error, 0) nics := map[string]*armnetwork.Interface{} // nicName -> nic @@ -993,18 +1001,18 @@ func (fs *FlexScaleSet) ensureBackendPoolDeletedFromNode(ctx context.Context, vm nic.Properties.IPConfigurations = newIPConfigs nicUpdaters = append(nicUpdaters, func() error { - klog.V(2).Infof("EnsureBackendPoolDeleted begins to CreateOrUpdate for NIC(%s, %s) with backendPoolIDs %q", fs.ResourceGroup, ptr.Deref(nic.Name, ""), backendPoolIDs) + logger.V(2).Info("begins to CreateOrUpdate for NIC with backendPoolIDs", "resourceGroup", fs.ResourceGroup, "nicName", ptr.Deref(nic.Name, ""), "backendPoolIDs", backendPoolIDs) _, rerr := fs.ComputeClientFactory.GetInterfaceClient().CreateOrUpdate(ctx, fs.ResourceGroup, ptr.Deref(nic.Name, ""), *nic) if rerr != nil { klog.Errorf("EnsureBackendPoolDeleted CreateOrUpdate for NIC(%s, %s) failed with error %v", fs.ResourceGroup, ptr.Deref(nic.Name, ""), rerr.Error()) return rerr } nicUpdated.Store(true) - klog.V(2).Infof("EnsureBackendPoolDeleted done") + logger.V(2).Info("done") return nil }) } - klog.V(2).Infof("nicUpdaters size: %d", len(nicUpdaters)) + logger.V(2).Info("", "nicUpdaters size", len(nicUpdaters)) errs := utilerrors.AggregateGoroutines(nicUpdaters...) if errs != nil { allErrs = append(allErrs, utilerrors.Flatten(errs)) diff --git a/pkg/provider/azure_vmssflex_cache.go b/pkg/provider/azure_vmssflex_cache.go index 9ffb30a995..c9ffe1fcbb 100644 --- a/pkg/provider/azure_vmssflex_cache.go +++ b/pkg/provider/azure_vmssflex_cache.go @@ -32,6 +32,7 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/util/errutils" ) @@ -131,6 +132,7 @@ func (fs *FlexScaleSet) newVmssFlexVMCache() (azcache.Resource, error) { } func (fs *FlexScaleSet) getNodeNameByVMName(ctx context.Context, vmName string) (string, error) { + logger := log.Background().WithName("getNodeNameByVMName") fs.lockMap.LockEntry(consts.GetNodeVmssFlexIDLockKey) defer fs.lockMap.UnlockEntry(consts.GetNodeVmssFlexIDLockKey) cachedNodeName, isCached := fs.vmssFlexVMNameToNodeName.Load(vmName) @@ -163,7 +165,7 @@ func (fs *FlexScaleSet) getNodeNameByVMName(ctx context.Context, vmName string) nodeName, err := getter(ctx, vmName, azcache.CacheReadTypeDefault) if errors.Is(err, cloudprovider.InstanceNotFound) { - klog.V(2).Infof("Could not find node (%s) in the existing cache. Forcely freshing the cache to check again...", nodeName) + logger.V(2).Info("Could not find node in the existing cache. Forcely freshing the cache to check again...", "node", nodeName) return getter(ctx, vmName, azcache.CacheReadTypeForceRefresh) } return nodeName, err @@ -171,6 +173,7 @@ func (fs *FlexScaleSet) getNodeNameByVMName(ctx context.Context, vmName string) } func (fs *FlexScaleSet) getNodeVmssFlexID(ctx context.Context, nodeName string) (string, error) { + logger := log.Background().WithName("getNodeVmssFlexID") fs.lockMap.LockEntry(consts.GetNodeVmssFlexIDLockKey) defer fs.lockMap.UnlockEntry(consts.GetNodeVmssFlexIDLockKey) cachedVmssFlexID, isCached := fs.vmssFlexVMNameToVmssID.Load(nodeName) @@ -221,7 +224,7 @@ func (fs *FlexScaleSet) getNodeVmssFlexID(ctx context.Context, nodeName string) vmssFlexID, err := getter(ctx, nodeName, azcache.CacheReadTypeDefault) if errors.Is(err, cloudprovider.InstanceNotFound) { - klog.V(2).Infof("Could not find node (%s) in the existing cache. Forcely freshing the cache to check again...", nodeName) + logger.V(2).Info("Could not find node in the existing cache. Forcely freshing the cache to check again...", "node", nodeName) return getter(ctx, nodeName, azcache.CacheReadTypeForceRefresh) } return vmssFlexID, err @@ -229,6 +232,7 @@ func (fs *FlexScaleSet) getNodeVmssFlexID(ctx context.Context, nodeName string) } func (fs *FlexScaleSet) getVmssFlexVM(ctx context.Context, nodeName string, crt azcache.AzureCacheReadType) (vm *armcompute.VirtualMachine, err error) { + logger := log.Background().WithName("getVmssFlexVM") vmssFlexID, err := fs.getNodeVmssFlexID(ctx, nodeName) if err != nil { return vm, err @@ -241,7 +245,7 @@ func (fs *FlexScaleSet) getVmssFlexVM(ctx context.Context, nodeName string, crt vmMap := cached.(*sync.Map) cachedVM, ok := vmMap.Load(nodeName) if !ok { - klog.V(2).Infof("did not find node (%s) in the existing cache, which means it is deleted...", nodeName) + logger.V(2).Info("did not find node in the existing cache, which means it is deleted...", "node", nodeName) return vm, cloudprovider.InstanceNotFound } @@ -249,6 +253,7 @@ func (fs *FlexScaleSet) getVmssFlexVM(ctx context.Context, nodeName string, crt } func (fs *FlexScaleSet) getVmssFlexByVmssFlexID(ctx context.Context, vmssFlexID string, crt azcache.AzureCacheReadType) (*armcompute.VirtualMachineScaleSet, error) { + logger := log.Background().WithName("getVmssFlexByVmssFlexID") cached, err := fs.vmssFlexCache.Get(ctx, consts.VmssFlexKey, crt) if err != nil { return nil, err @@ -259,7 +264,7 @@ func (fs *FlexScaleSet) getVmssFlexByVmssFlexID(ctx context.Context, vmssFlexID return result, nil } - klog.V(2).Infof("Couldn't find VMSS Flex with ID %s, refreshing the cache", vmssFlexID) + logger.V(2).Info("Couldn't find VMSS Flex, refreshing the cache", "vmssFlexID", vmssFlexID) cached, err = fs.vmssFlexCache.Get(ctx, consts.VmssFlexKey, azcache.CacheReadTypeForceRefresh) if err != nil { return nil, err @@ -337,6 +342,7 @@ func (fs *FlexScaleSet) getVmssFlexByName(ctx context.Context, vmssFlexName stri } func (fs *FlexScaleSet) DeleteCacheForNode(ctx context.Context, nodeName string) error { + logger := log.Background().WithName("DeleteCacheForNode") if fs.DisableAPICallCache { return nil } @@ -364,6 +370,6 @@ func (fs *FlexScaleSet) DeleteCacheForNode(ctx context.Context, nodeName string) fs.vmssFlexVMCache.Update(vmssFlexID, vmMap) fs.vmssFlexVMNameToVmssID.Delete(nodeName) - klog.V(2).Infof("DeleteCacheForNode(%s, %s) successfully", vmssFlexID, nodeName) + logger.V(2).Info("successfully", "vmssFlexID", vmssFlexID, "node", nodeName) return nil } diff --git a/pkg/provider/azure_zones.go b/pkg/provider/azure_zones.go index c9de849d70..76457795f3 100644 --- a/pkg/provider/azure_zones.go +++ b/pkg/provider/azure_zones.go @@ -31,21 +31,24 @@ import ( azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) var _ cloudprovider.Zones = (*Cloud)(nil) func (az *Cloud) refreshZones(ctx context.Context, refreshFunc func(ctx context.Context) error) { - klog.V(2).Info("refreshZones: refreshing zones every 30 minutes.") + logger := log.Background().WithName("refreshZones") + logger.V(2).Info("refreshing zones every 30 minutes") err := wait.PollUntilContextCancel(ctx, consts.ZoneFetchingInterval, false, func(ctx context.Context) (bool, error) { _ = refreshFunc(ctx) return false, nil }) - klog.V(2).Infof("refreshZones: refresh zones finished with error: %s", err.Error()) + logger.V(2).Error(err, "refreshZones: refresh zones finished with error") } func (az *Cloud) syncRegionZonesMap(ctx context.Context) error { - klog.V(2).Infof("syncRegionZonesMap: starting to fetch all available zones for the subscription %s", az.SubscriptionID) + logger := log.Background().WithName("syncRegionZonesMap") + logger.V(2).Info("starting to fetch all available zones for the subscription", "subscriptionID", az.SubscriptionID) zones, err := az.zoneRepo.ListZones(ctx) if err != nil { return fmt.Errorf("list zones: %w", err) @@ -74,10 +77,11 @@ func (az *Cloud) updateRegionZonesMap(zones map[string][]string) { } func (az *Cloud) getRegionZonesBackoff(ctx context.Context, region string) ([]*string, error) { + logger := log.Background().WithName("getRegionZonesBackoff") if az.IsStackCloud() { // Azure Stack does not support zone at the moment // https://docs.microsoft.com/en-us/azure-stack/user/azure-stack-network-differences?view=azs-2102 - klog.V(3).Infof("getRegionZonesMapWrapper: Azure Stack does not support Zones at the moment, skipping") + logger.V(3).Info("Azure Stack does not support Zones at the moment, skipping") return to.SliceOfPtrs(az.regionZonesMap[region]...), nil } @@ -88,7 +92,7 @@ func (az *Cloud) getRegionZonesBackoff(ctx context.Context, region string) ([]*s return to.SliceOfPtrs(az.regionZonesMap[region]...), nil } - klog.V(2).Infof("getRegionZonesMapWrapper: the region-zones map is not initialized successfully, retrying immediately") + logger.V(2).Info("the region-zones map is not initialized successfully, retrying immediately") var ( zones map[string][]string @@ -144,6 +148,7 @@ func (az *Cloud) GetZoneID(zoneLabel string) string { // This interface will not be called if InstancesV2 is enabled. // If the node is not running with availability zones, then it will fall back to fault domain. func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { + logger := log.Background().WithName("GetZone") if az.UseInstanceMetadata { metadata, err := az.Metadata.GetMetadata(ctx, azcache.CacheReadTypeUnsafe) if err != nil { @@ -164,7 +169,7 @@ func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { } zone = az.makeZone(location, zoneID) } else { - klog.V(3).Infof("Availability zone is not enabled for the node, falling back to fault domain") + logger.V(3).Info("Availability zone is not enabled for the node, falling back to fault domain") zone = metadata.Compute.FaultDomain } @@ -190,13 +195,14 @@ func (az *Cloud) GetZone(ctx context.Context) (cloudprovider.Zone, error) { // DEPRECATED: Zones is deprecated in favor of retrieving zone/region information from InstancesV2. // This interface will not be called if InstancesV2 is enabled. func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cloudprovider.Zone, error) { + logger := log.Background().WithName("GetZoneByProviderID") if providerID == "" { return cloudprovider.Zone{}, errNodeNotInitialized } // Returns nil for unmanaged nodes because azure cloud provider couldn't fetch information for them. if az.IsNodeUnmanagedByProviderID(providerID) { - klog.V(2).Infof("GetZoneByProviderID: omitting unmanaged node %q", providerID) + logger.V(2).Info("omitting unmanaged node", "providerID", providerID) return cloudprovider.Zone{}, nil } @@ -214,13 +220,14 @@ func (az *Cloud) GetZoneByProviderID(ctx context.Context, providerID string) (cl // DEPRECATED: Zones is deprecated in favor of retrieving zone/region information from InstancesV2. // This interface will not be called if InstancesV2 is enabled. func (az *Cloud) GetZoneByNodeName(ctx context.Context, nodeName types.NodeName) (cloudprovider.Zone, error) { + logger := log.Background().WithName("GetZoneByNodeName") // Returns "" for unmanaged nodes because azure cloud provider couldn't fetch information for them. unmanaged, err := az.IsNodeUnmanaged(string(nodeName)) if err != nil { return cloudprovider.Zone{}, err } if unmanaged { - klog.V(2).Infof("GetZoneByNodeName: omitting unmanaged node %q", nodeName) + logger.V(2).Info("omitting unmanaged node", "node", nodeName) return cloudprovider.Zone{}, nil } diff --git a/pkg/provider/securitygroup/azure_securitygroup_repo.go b/pkg/provider/securitygroup/azure_securitygroup_repo.go index 0c602b8ee2..814c015583 100644 --- a/pkg/provider/securitygroup/azure_securitygroup_repo.go +++ b/pkg/provider/securitygroup/azure_securitygroup_repo.go @@ -34,6 +34,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient/securitygroupclient" azcache "sigs.k8s.io/cloud-provider-azure/pkg/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" "sigs.k8s.io/cloud-provider-azure/pkg/util/errutils" ) @@ -55,6 +56,7 @@ type securityGroupRepo struct { } func NewSecurityGroupRepo(securityGroupResourceGroup string, securityGroupName string, nsgCacheTTLInSeconds int, disableAPICallCache bool, securityGroupClient securitygroupclient.Interface) (Repository, error) { + logger := log.Background().WithName("NewSecurityGroupRepo") getter := func(ctx context.Context, key string) (interface{}, error) { nsg, err := securityGroupClient.Get(ctx, securityGroupResourceGroup, key) exists, rerr := errutils.CheckResourceExistsFromAzcoreError(err) @@ -63,7 +65,7 @@ func NewSecurityGroupRepo(securityGroupResourceGroup string, securityGroupName s } if !exists { - klog.V(2).Infof("Security group %q not found", key) + logger.V(2).Info("Security group not found", "securityGroup", key) return nil, nil } @@ -90,8 +92,9 @@ func NewSecurityGroupRepo(securityGroupResourceGroup string, securityGroupName s // CreateOrUpdateSecurityGroup invokes az.SecurityGroupsClient.CreateOrUpdate with exponential backoff retry func (az *securityGroupRepo) CreateOrUpdateSecurityGroup(ctx context.Context, sg *armnetwork.SecurityGroup) error { + logger := log.Background().WithName("CreateOrUpdateSecurityGroup") _, rerr := az.securigyGroupClient.CreateOrUpdate(ctx, az.securityGroupResourceGroup, *sg.Name, *sg) - klog.V(10).Infof("SecurityGroupsClient.CreateOrUpdate(%s): end", *sg.Name) + logger.V(10).Info("SecurityGroupsClient.CreateOrUpdate: end", "securityGroupName", *sg.Name) if rerr == nil { // Invalidate the cache right after updating _ = az.nsgCache.Delete(*sg.Name) @@ -104,13 +107,13 @@ func (az *securityGroupRepo) CreateOrUpdateSecurityGroup(ctx context.Context, sg // Invalidate the cache because ETAG precondition mismatch. if respError.StatusCode == http.StatusPreconditionFailed { - klog.V(3).Infof("SecurityGroup cache for %s is cleanup because of http.StatusPreconditionFailed", *sg.Name) + logger.V(3).Info("SecurityGroup cache is cleanup because of http.StatusPreconditionFailed", "securityGroupName", *sg.Name) _ = az.nsgCache.Delete(*sg.Name) } // Invalidate the cache because another new operation has canceled the current request. if strings.Contains(strings.ToLower(respError.Error()), consts.OperationCanceledErrorMessage) { - klog.V(3).Infof("SecurityGroup cache for %s is cleanup because CreateOrUpdateSecurityGroup is canceled by another operation", *sg.Name) + logger.V(3).Info("SecurityGroup cache is cleanup because CreateOrUpdateSecurityGroup is canceled by another operation", "securityGroupName", *sg.Name) _ = az.nsgCache.Delete(*sg.Name) } } diff --git a/pkg/provider/storage/azure_storageaccount.go b/pkg/provider/storage/azure_storageaccount.go index b45cd563f1..fce83d003d 100644 --- a/pkg/provider/storage/azure_storageaccount.go +++ b/pkg/provider/storage/azure_storageaccount.go @@ -38,6 +38,7 @@ import ( "sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/cache" "sigs.k8s.io/cloud-provider-azure/pkg/consts" + "sigs.k8s.io/cloud-provider-azure/pkg/log" azureconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" "sigs.k8s.io/cloud-provider-azure/pkg/provider/storage/fileservice" "sigs.k8s.io/cloud-provider-azure/pkg/provider/subnet" @@ -220,6 +221,7 @@ func parseServiceAccountToken(tokenStr string) (string, error) { } func (az *AccountRepo) getStorageAccountWithCache(ctx context.Context, subsID, resourceGroup, account string) (armstorage.Account, error) { + logger := log.Background().WithName("getStorageAccountWithCache") if az.ComputeClientFactory == nil { return armstorage.Account{}, fmt.Errorf("ComputeClientFactory is nil") } @@ -232,7 +234,7 @@ func (az *AccountRepo) getStorageAccountWithCache(ctx context.Context, subsID, r return armstorage.Account{}, err } if cache != nil { - klog.V(2).Infof("Get storage account(%s) from cache", account) + logger.V(2).Info("Get storage account from cache", "account", account) return *cache, nil } @@ -283,6 +285,7 @@ func (az *AccountRepo) GetStorageAccesskeyFromServiceAccountToken(ctx context.Co // GetStorageAccesskey gets the storage account access key // getLatestAccountKey: get the latest account key per CreationTime if true, otherwise get the first account key func (az *AccountRepo) GetStorageAccesskey(ctx context.Context, accountClient accountclient.Interface, account, resourceGroup string, getLatestAccountKey bool) (string, error) { + logger := log.Background().WithName("GetStorageAccesskey") result, err := accountClient.ListKeys(ctx, resourceGroup, account) if err != nil { return "", err @@ -310,12 +313,12 @@ func (az *AccountRepo) GetStorageAccesskey(ctx context.Context, accountClient ac if k.CreationTime != nil { creationTime = *k.CreationTime } - klog.V(2).Infof("got storage account key with creation time: %v", creationTime) + logger.V(2).Info("got storage account key with creation time", "creationTime", creationTime) } else { if k.CreationTime != nil && creationTime.Before(*k.CreationTime) { key = v creationTime = *k.CreationTime - klog.V(2).Infof("got storage account key with latest creation time: %v", creationTime) + logger.V(2).Info("got storage account key with latest creation time", "creationTime", creationTime) } } } @@ -329,6 +332,7 @@ func (az *AccountRepo) GetStorageAccesskey(ctx context.Context, accountClient ac // EnsureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions *AccountOptions, genAccountNamePrefix string) (string, string, error) { + logger := log.Background().WithName("EnsureStorageAccount") if accountOptions == nil { return "", "", fmt.Errorf("account options is nil") } @@ -369,7 +373,7 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions } if ptr.Deref(accountOptions.CreatePrivateEndpoint, false) { if accountOptions.StorageType == "" { - klog.V(2).Info("set StorageType as file when not specified") + logger.V(2).Info("set StorageType as file when not specified") accountOptions.StorageType = StorageTypeFile } @@ -408,7 +412,7 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions } if len(accounts) > 0 { - klog.V(4).Infof("found %d matching accounts", len(accounts)) + logger.V(4).Info("found matching accounts", "count", len(accounts)) index := 0 if accountOptions.PickRandomMatchingAccount { // randomly pick one matching account @@ -417,21 +421,21 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions return "", "", err } index = int(n.Int64()) - klog.V(4).Infof("randomly pick one matching account, index: %d, matching accounts: %s", index, accounts) + logger.V(4).Info("randomly pick one matching account", "index", index, "matching accounts", accounts) } accountName = accounts[index].Name createNewAccount = false if accountOptions.SourceAccountName != "" { - klog.V(4).Infof("source account name(%s) is provided, try to find a matching account with source account name", accountOptions.SourceAccountName) + logger.V(4).Info("source account name is provided, try to find a matching account with source account name", "sourceAccountName", accountOptions.SourceAccountName) for _, acct := range accounts { if acct.Name == accountOptions.SourceAccountName { - klog.V(2).Infof("found a matching account %s type %s location %s with source account name", acct.Name, acct.StorageType, acct.Location) + logger.V(2).Info("found a matching account with source account name", "account", acct.Name, "type", acct.StorageType, "location", acct.Location) accountName = acct.Name break } } } - klog.V(4).Infof("found a matching account %s with account index %d", accountName, index) + logger.V(4).Info("found a matching account", "account", accountName, "index", index) } } @@ -443,7 +447,7 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions if accountOptions.CreateAccount { // check whether account exists if _, err := az.GetStorageAccesskey(ctx, storageAccountClient, accountName, resourceGroup, accountOptions.GetLatestAccountKey); err != nil { - klog.V(2).Infof("get storage key for storage account %s returned with %v", accountName, err) + logger.V(2).Error(err, "get storage key for storage account returned with error", "account", accountName) createNewAccount = true } } @@ -499,7 +503,7 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions Action: to.Ptr(string(armstorage.DefaultActionAllow)), } virtualNetworkRules = append(virtualNetworkRules, vnetRule) - klog.V(4).Infof("subnetID(%s) has been set", subnetID) + logger.V(4).Info("subnetID has been set", "subnetID", subnetID) } if len(virtualNetworkRules) > 0 { networkRuleSet = &armstorage.NetworkRuleSet{ @@ -527,13 +531,18 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions var publicNetworkAccess *armstorage.PublicNetworkAccess if accountOptions.PublicNetworkAccess != "" { - klog.V(2).Infof("set PublicNetworkAccess(%s) on account(%s), subscription(%s), resource group(%s)", accountOptions.PublicNetworkAccess, accountName, subsID, resourceGroup) + logger.V(2).Info("set PublicNetworkAccess on account", "PublicNetworkAccess", accountOptions.PublicNetworkAccess, "account", accountName, "subscription", subsID, "resourceGroup", resourceGroup) access := armstorage.PublicNetworkAccess(accountOptions.PublicNetworkAccess) publicNetworkAccess = &access } - klog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s, accountKind: %s, tags: %+v", - accountName, resourceGroup, location, accountType, kind, accountOptions.Tags) + logger.V(2).Info("azure - no matching account found, begin to create a new account", + "accountName", accountName, + "resourceGroup", resourceGroup, + "location", location, + "accountType", accountType, + "accountKind", kind, + "tags", accountOptions.Tags) cp := &armstorage.AccountCreateParameters{ SKU: &armstorage.SKU{Name: to.Ptr(armstorage.SKUName(accountType))}, @@ -554,15 +563,15 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions if *accountOptions.EnableLargeFileShare { state = armstorage.LargeFileSharesStateEnabled } - klog.V(2).Infof("enable LargeFileShare(%s) for storage account(%s)", state, accountName) + logger.V(2).Info("enable LargeFileShare for storage account", "LargeFileShare", state, "account", accountName) cp.Properties.LargeFileSharesState = to.Ptr(state) } if accountOptions.AllowBlobPublicAccess != nil { - klog.V(2).Infof("set AllowBlobPublicAccess(%v) for storage account(%s)", *accountOptions.AllowBlobPublicAccess, accountName) + logger.V(2).Info("set AllowBlobPublicAccess for storage account", "AllowBlobPublicAccess", *accountOptions.AllowBlobPublicAccess, "account", accountName) cp.Properties.AllowBlobPublicAccess = accountOptions.AllowBlobPublicAccess } if accountOptions.RequireInfrastructureEncryption != nil { - klog.V(2).Infof("set RequireInfrastructureEncryption(%v) for storage account(%s)", *accountOptions.RequireInfrastructureEncryption, accountName) + logger.V(2).Info("set RequireInfrastructureEncryption for storage account", "RequireInfrastructureEncryption", *accountOptions.RequireInfrastructureEncryption, "account", accountName) cp.Properties.Encryption = &armstorage.Encryption{ RequireInfrastructureEncryption: accountOptions.RequireInfrastructureEncryption, KeySource: to.Ptr(armstorage.KeySourceMicrosoftStorage), @@ -573,11 +582,11 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions } } if accountOptions.AllowSharedKeyAccess != nil { - klog.V(2).Infof("set Allow SharedKeyAccess (%v) for storage account (%s)", *accountOptions.AllowSharedKeyAccess, accountName) + logger.V(2).Info("set Allow SharedKeyAccess for storage account", "allowSharedKeyAccess", *accountOptions.AllowSharedKeyAccess, "account", accountName) cp.Properties.AllowSharedKeyAccess = accountOptions.AllowSharedKeyAccess } if accountOptions.KeyVaultURI != nil { - klog.V(2).Infof("set KeyVault(%v) for storage account(%s)", accountOptions.KeyVaultURI, accountName) + logger.V(2).Info("set KeyVault for storage account", "KeyVault", accountOptions.KeyVaultURI, "account", accountName) cp.Properties.Encryption = &armstorage.Encryption{ KeyVaultProperties: &armstorage.KeyVaultProperties{ KeyName: accountOptions.KeyName, @@ -593,7 +602,7 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions } if accountOptions.IsSmbOAuthEnabled != nil { - klog.V(2).Infof("set IsSmbOAuthEnabled(%v) for storage account(%s)", *accountOptions.IsSmbOAuthEnabled, accountName) + logger.V(2).Info("set IsSmbOAuthEnabled for storage account", "IsSmbOAuthEnabled", *accountOptions.IsSmbOAuthEnabled, "account", accountName) if cp.Properties.AzureFilesIdentityBasedAuthentication == nil { cp.Properties.AzureFilesIdentityBasedAuthentication = &armstorage.AzureFilesIdentityBasedAuthentication{ DirectoryServiceOptions: to.Ptr(armstorage.DirectoryServiceOptionsNone), @@ -660,12 +669,15 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions prop.FileServiceProperties.Cors = nil if accountOptions.DisableFileServiceDeleteRetentionPolicy != nil { enable := !*accountOptions.DisableFileServiceDeleteRetentionPolicy - klog.V(2).Infof("set ShareDeleteRetentionPolicy(%v) on account(%s), subscription(%s), resource group(%s)", - enable, accountName, subsID, resourceGroup) + logger.V(2).Info("set ShareDeleteRetentionPolicy on account", + "ShareDeleteRetentionPolicy", enable, + "account", accountName, + "subscription", subsID, + "resourceGroup", resourceGroup) prop.FileServiceProperties.ShareDeleteRetentionPolicy = &armstorage.DeleteRetentionPolicy{Enabled: &enable} } if accountOptions.IsMultichannelEnabled != nil { - klog.V(2).Infof("enable SMB Multichannel setting on account(%s), subscription(%s), resource group(%s)", accountName, subsID, resourceGroup) + logger.V(2).Info("enable SMB Multichannel setting on account", "account", accountName, "subscription", subsID, "resourceGroup", resourceGroup) enabled := *accountOptions.IsMultichannelEnabled prop.FileServiceProperties.ProtocolSettings = &armstorage.ProtocolSettings{Smb: &armstorage.SmbSetting{Multichannel: &armstorage.Multichannel{Enabled: &enabled}}} } @@ -676,7 +688,7 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions } if accountOptions.AccessTier != "" { - klog.V(2).Infof("set AccessTier(%s) on account(%s), subscription(%s), resource group(%s)", accountOptions.AccessTier, accountName, subsID, resourceGroup) + logger.V(2).Info("set AccessTier on account", "AccessTier", accountOptions.AccessTier, "account", accountName, "subscription", subsID, "resourceGroup", resourceGroup) cp.Properties.AccessTier = to.Ptr(armstorage.AccessTier(accountOptions.AccessTier)) } } @@ -722,7 +734,8 @@ func (az *AccountRepo) EnsureStorageAccount(ctx context.Context, accountOptions } func (az *AccountRepo) createPrivateEndpoint(ctx context.Context, accountName string, accountID *string, privateEndpointName, vnetResourceGroup, vnetName, subnetName, location string, storageType Type) error { - klog.V(2).Infof("Creating private endpoint(%s) for account (%s)", privateEndpointName, accountName) + logger := log.Background().WithName("createPrivateEndpoint") + logger.V(2).Info("Creating private endpoint", "privateEndpointName", privateEndpointName, "account", accountName) subnet, err := az.subnetRepo.Get(ctx, vnetResourceGroup, vnetName, subnetName) if err != nil { @@ -735,7 +748,7 @@ func (az *AccountRepo) createPrivateEndpoint(ctx context.Context, accountName st if subnet.Properties.PrivateEndpointNetworkPolicies == nil || *subnet.Properties.PrivateEndpointNetworkPolicies == armnetwork.VirtualNetworkPrivateEndpointNetworkPoliciesEnabled { subnet.Properties.PrivateEndpointNetworkPolicies = to.Ptr(armnetwork.VirtualNetworkPrivateEndpointNetworkPoliciesDisabled) } else { - klog.V(2).Infof("PrivateEndpointNetworkPolicies is already set to %s for subnet (%s, %s)", *subnet.Properties.PrivateEndpointNetworkPolicies, vnetName, subnetName) + logger.V(2).Info("PrivateEndpointNetworkPolicies is already set for subnet", "policies", *subnet.Properties.PrivateEndpointNetworkPolicies, "vnetName", vnetName, "subnetName", subnetName) } } @@ -771,7 +784,8 @@ func (az *AccountRepo) createPrivateEndpoint(ctx context.Context, accountName st } func (az *AccountRepo) createPrivateDNSZone(ctx context.Context, vnetResourceGroup, privateDNSZoneName string) error { - klog.V(2).Infof("Creating private dns zone(%s) in resourceGroup (%s)", privateDNSZoneName, vnetResourceGroup) + logger := log.Background().WithName("createPrivateDNSZone") + logger.V(2).Info("Creating private DNS zone", "privateDNSZone", privateDNSZoneName, "ResourceGroup", vnetResourceGroup) location := LocationGlobal privateDNSZone := privatedns.PrivateZone{Location: &location} clientFactory := az.NetworkClientFactory @@ -783,7 +797,7 @@ func (az *AccountRepo) createPrivateDNSZone(ctx context.Context, vnetResourceGro if _, err := privatednsclient.CreateOrUpdate(ctx, vnetResourceGroup, privateDNSZoneName, privateDNSZone); err != nil { if strings.Contains(err.Error(), "exists already") { - klog.V(2).Infof("private dns zone(%s) in resourceGroup (%s) already exists", privateDNSZoneName, vnetResourceGroup) + logger.V(2).Info("private dns zone in resourceGroup already exists", "privateDNSZone", privateDNSZoneName, "ResourceGroup", vnetResourceGroup) return nil } return err @@ -792,7 +806,8 @@ func (az *AccountRepo) createPrivateDNSZone(ctx context.Context, vnetResourceGro } func (az *AccountRepo) createVNetLink(ctx context.Context, vNetLinkName, vnetResourceGroup, vnetName, privateDNSZoneName string) error { - klog.V(2).Infof("Creating virtual link for vnet(%s) and DNS Zone(%s) in resourceGroup(%s)", vNetLinkName, privateDNSZoneName, vnetResourceGroup) + logger := log.Background().WithName("createVNetLink") + logger.V(2).Info("Creating virtual network link", "vNetLinkName", vNetLinkName, "privateDNSZone", privateDNSZoneName, "ResourceGroup", vnetResourceGroup) clientFactory := az.NetworkClientFactory if clientFactory == nil { // multi-tenant support @@ -813,7 +828,8 @@ func (az *AccountRepo) createVNetLink(ctx context.Context, vNetLinkName, vnetRes } func (az *AccountRepo) createPrivateDNSZoneGroup(ctx context.Context, dnsZoneGroupName, privateEndpointName, vnetResourceGroup, vnetName, privateDNSZoneName string) error { - klog.V(2).Infof("Creating private DNS zone group(%s) with privateEndpoint(%s), vNetName(%s), resourceGroup(%s)", dnsZoneGroupName, privateEndpointName, vnetName, vnetResourceGroup) + logger := log.Background().WithName("createPrivateDNSZoneGroup") + logger.V(2).Info("Creating private DNS zone group", "dnsZoneGroup", dnsZoneGroupName, "privateEndpoint", privateEndpointName, "vnetName", vnetName, "ResourceGroup", vnetResourceGroup) privateDNSZoneGroup := &armnetwork.PrivateDNSZoneGroup{ Properties: &armnetwork.PrivateDNSZoneGroupPropertiesFormat{ PrivateDNSZoneConfigs: []*armnetwork.PrivateDNSZoneConfig{ @@ -837,6 +853,7 @@ func (az *AccountRepo) createPrivateDNSZoneGroup(ctx context.Context, dnsZoneGro // AddStorageAccountTags add tags to storage account func (az *AccountRepo) AddStorageAccountTags(ctx context.Context, subsID, resourceGroup, account string, tags map[string]*string) error { + logger := log.Background().WithName("AddStorageAccountTags") // add lock to avoid concurrent update on the cache az.lockMap.LockEntry(account) defer az.lockMap.UnlockEntry(account) @@ -859,7 +876,7 @@ func (az *AccountRepo) AddStorageAccountTags(ctx context.Context, subsID, resour // only update when newTags is different from old tags _ = az.storageAccountCache.Delete(account) // clean cache updateParams := &armstorage.AccountUpdateParameters{Tags: newTags} - klog.V(2).Infof("add storage account(%s) with tags(%+v)", account, newTags) + logger.V(2).Info("", "account", account, "tags", newTags) accountClient, err := az.ComputeClientFactory.GetAccountClientForSub(subsID) if err != nil { return err @@ -872,6 +889,7 @@ func (az *AccountRepo) AddStorageAccountTags(ctx context.Context, subsID, resour // RemoveStorageAccountTag remove tag from storage account func (az *AccountRepo) RemoveStorageAccountTag(ctx context.Context, subsID, resourceGroup, account, key string) error { + logger := log.Background().WithName("RemoveStorageAccountTag") // add lock to avoid concurrent update on the cache az.lockMap.LockEntry(account) defer az.lockMap.UnlockEntry(account) @@ -891,7 +909,7 @@ func (az *AccountRepo) RemoveStorageAccountTag(ctx context.Context, subsID, reso // only update when newTags is different from old tags _ = az.storageAccountCache.Delete(account) // clean cache updateParams := &armstorage.AccountUpdateParameters{Tags: result.Tags} - klog.V(2).Infof("remove tag(%s) from storage account(%s)", key, account) + logger.V(2).Info("", "tag", key, "account", account) accountClient, err := az.ComputeClientFactory.GetAccountClientForSub(subsID) if err != nil { return err @@ -924,6 +942,7 @@ func isLocationEqual(account *armstorage.Account, accountOptions *AccountOptions } func AreVNetRulesEqual(account *armstorage.Account, accountOptions *AccountOptions) bool { + logger := log.Background().WithName("AreVNetRulesEqual") if len(accountOptions.VirtualNetworkResourceIDs) > 0 { if account.Properties == nil || account.Properties.NetworkRuleSet == nil || account.Properties.NetworkRuleSet.VirtualNetworkRules == nil { @@ -942,7 +961,7 @@ func AreVNetRulesEqual(account *armstorage.Account, accountOptions *AccountOptio return false } } - klog.V(2).Infof("found all vnet rules(%v) in account %s", accountOptions.VirtualNetworkResourceIDs, ptr.Deref(account.Name, "")) + logger.V(2).Info("found all vnet rules in account", "rules", accountOptions.VirtualNetworkResourceIDs, "account", ptr.Deref(account.Name, "")) } return true } @@ -958,10 +977,11 @@ func isLargeFileSharesPropertyEqual(account *armstorage.Account, accountOptions } func isTaggedWithSkip(account *armstorage.Account) bool { + logger := log.Background().WithName("isTaggedWithSkip") if account.Tags != nil { // skip account with SkipMatchingTag tag if _, ok := account.Tags[SkipMatchingTag]; ok { - klog.V(2).Infof("found %s tag for account %s, skip matching", SkipMatchingTag, ptr.Deref(account.Name, "")) + logger.V(2).Info("found tag for account, skip matching", "tag", SkipMatchingTag, "account", ptr.Deref(account.Name, "")) return false } } diff --git a/pkg/provider/storage/fileservice/fileservice_repo.go b/pkg/provider/storage/fileservice/fileservice_repo.go index 58ce17bc56..2a6b04cb1a 100644 --- a/pkg/provider/storage/fileservice/fileservice_repo.go +++ b/pkg/provider/storage/fileservice/fileservice_repo.go @@ -23,10 +23,10 @@ import ( "time" armstorage "github.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage/v2" - "k8s.io/klog/v2" "sigs.k8s.io/cloud-provider-azure/pkg/azclient" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/cache" + "sigs.k8s.io/cloud-provider-azure/pkg/log" azureconfig "sigs.k8s.io/cloud-provider-azure/pkg/provider/config" ) @@ -52,6 +52,7 @@ func NewRepository(config azureconfig.Config, clientFactory azclient.ClientFacto }, nil } func (az *fileServicePropertiesRepo) Get(ctx context.Context, subsID, resourceGroup, account string) (*armstorage.FileServiceProperties, error) { + logger := log.Background().WithName("Get") if az.clientFactory == nil { return nil, fmt.Errorf("clientFactory is nil") } @@ -65,7 +66,7 @@ func (az *fileServicePropertiesRepo) Get(ctx context.Context, subsID, resourceGr return nil, err } if cache != nil { - klog.V(2).Infof("Get service properties(%s) from cache", account) + logger.V(2).Info("Get service properties from cache", "account", account) return cache, nil } diff --git a/pkg/provider/storage/storage_account.go b/pkg/provider/storage/storage_account.go index 1a8d3dc231..adbfabdbe2 100644 --- a/pkg/provider/storage/storage_account.go +++ b/pkg/provider/storage/storage_account.go @@ -22,14 +22,14 @@ import ( "strings" "time" - "k8s.io/klog/v2" - "sigs.k8s.io/cloud-provider-azure/pkg/azclient/accountclient" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) // GetStorageAccesskey gets the storage account access key // getLatestAccountKey: get the latest account key per CreationTime if true, otherwise get the first account key func GetStorageAccesskey(ctx context.Context, saClient accountclient.Interface, account, resourceGroup string, getLatestAccountKey bool) (string, error) { + logger := log.Background().WithName("GetStorageAccesskey") if saClient == nil { return "", fmt.Errorf("StorageAccountClient is nil") } @@ -61,12 +61,12 @@ func GetStorageAccesskey(ctx context.Context, saClient accountclient.Interface, if k.CreationTime != nil { creationTime = *k.CreationTime } - klog.V(2).Infof("got storage account key with creation time: %v", creationTime) + logger.V(2).Info("got storage account key with creation time", "creationTime", creationTime) } else { if k.CreationTime != nil && creationTime.Before(*k.CreationTime) { key = v creationTime = *k.CreationTime - klog.V(2).Infof("got storage account key with latest creation time: %v", creationTime) + logger.V(2).Info("got storage account key with latest creation time", "creationTime", creationTime) } } } diff --git a/pkg/provider/subnet/subnet.go b/pkg/provider/subnet/subnet.go index 940e2342b9..015a8661d9 100644 --- a/pkg/provider/subnet/subnet.go +++ b/pkg/provider/subnet/subnet.go @@ -23,6 +23,7 @@ import ( "k8s.io/klog/v2" "sigs.k8s.io/cloud-provider-azure/pkg/azclient/subnetclient" + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) type Repository interface { @@ -42,8 +43,9 @@ func NewRepo(subnetsClient subnetclient.Interface) (Repository, error) { // CreateOrUpdateSubnet invokes az.SubnetClient.CreateOrUpdate with exponential backoff retry func (az *repo) CreateOrUpdate(ctx context.Context, rg string, vnetName string, subnetName string, subnet armnetwork.Subnet) error { + logger := log.Background().WithName("SubnetsClient.CreateOrUpdate") _, rerr := az.SubnetsClient.CreateOrUpdate(ctx, rg, vnetName, subnetName, subnet) - klog.V(10).Infof("SubnetsClient.CreateOrUpdate(%s): end", subnetName) + logger.V(10).Info("end", "subnetName", subnetName) if rerr != nil { klog.Errorf("SubnetClient.CreateOrUpdate(%s) failed: %s", subnetName, rerr.Error()) return rerr diff --git a/pkg/util/controller/node/controller_utils.go b/pkg/util/controller/node/controller_utils.go index e1a6e72a3d..d47d5128a5 100644 --- a/pkg/util/controller/node/controller_utils.go +++ b/pkg/util/controller/node/controller_utils.go @@ -24,6 +24,8 @@ import ( "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" + + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) // CreateAddNodeHandler creates an add node handler. @@ -89,6 +91,7 @@ func GetNodeCondition(status *v1.NodeStatus, conditionType v1.NodeConditionType) // RecordNodeStatusChange records a event related to a node status change. (Common to lifecycle and ipam) func RecordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, newStatus string) { + logger := log.Background().WithName("RecordNodeStatusChange") ref := &v1.ObjectReference{ APIVersion: "v1", Kind: "Node", @@ -96,7 +99,7 @@ func RecordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, newSta UID: node.UID, Namespace: "", } - klog.V(2).Infof("Recording status change %s event message for node %s", newStatus, node.Name) + logger.V(2).Info("Recording status change event message for node", "status", newStatus, "node", node.Name) // TODO: This requires a transaction, either both node status is updated // and event is recorded or neither should happen, see issue #6055. recorder.Eventf(ref, v1.EventTypeNormal, newStatus, "Node %s status is now: %s", node.Name, newStatus) diff --git a/pkg/util/node/node.go b/pkg/util/node/node.go index 714f1eb120..950f4e251d 100644 --- a/pkg/util/node/node.go +++ b/pkg/util/node/node.go @@ -26,7 +26,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" clientset "k8s.io/client-go/kubernetes" - "k8s.io/klog/v2" + + "sigs.k8s.io/cloud-provider-azure/pkg/log" ) type nodeForCIDRMergePatch struct { @@ -66,6 +67,7 @@ func PatchNodeCIDR(c clientset.Interface, node types.NodeName, cidr string) erro // PatchNodeCIDRs patches the specified node.CIDR=cidrs[0] and node.CIDRs to the given value. func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) error { + logger := log.Background().WithName("PatchNodeCIDRs") // set the pod cidrs list and set the old pod cidr field patch := nodeForCIDRMergePatch{ Spec: nodeSpecForMergePatch{ @@ -78,7 +80,7 @@ func PatchNodeCIDRs(c clientset.Interface, node types.NodeName, cidrs []string) if err != nil { return fmt.Errorf("failed to json.Marshal CIDR: %w", err) } - klog.V(4).Infof("cidrs patch bytes for node %s are:%s", string(node), string(patchBytes)) + logger.V(4).Info("cidrs patch bytes for node", "node", string(node), "cidrsPatchBytes", string(patchBytes)) if _, err := c.CoreV1().Nodes().Patch(context.TODO(), string(node), types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil { return fmt.Errorf("failed to patch node CIDR: %w", err) }