diff --git a/go.mod b/go.mod index 4a3ded2ae..7298602ae 100644 --- a/go.mod +++ b/go.mod @@ -23,13 +23,13 @@ require ( golang.org/x/net v0.27.0 google.golang.org/grpc v1.65.0 google.golang.org/protobuf v1.34.2 - k8s.io/api v0.28.3 - k8s.io/apimachinery v0.28.3 - k8s.io/client-go v0.28.3 - k8s.io/component-base v0.28.3 + k8s.io/api v0.28.12 + k8s.io/apimachinery v0.28.12 + k8s.io/client-go v0.28.12 + k8s.io/component-base v0.28.12 k8s.io/klog/v2 v2.100.1 - k8s.io/kubernetes v1.28.4 - k8s.io/mount-utils v0.28.3 + k8s.io/kubernetes v1.28.12 + k8s.io/mount-utils v0.28.12 k8s.io/utils v0.0.0-20230505201702-9f6742963106 sigs.k8s.io/cloud-provider-azure v1.27.1-0.20230907063607-e9994a5f9c7a sigs.k8s.io/yaml v1.3.0 @@ -51,8 +51,8 @@ require ( github.com/onsi/ginkgo/v2 v2.13.0 github.com/pkg/errors v0.9.1 github.com/satori/go.uuid v1.2.0 - k8s.io/apiserver v0.28.3 - k8s.io/pod-security-admission v0.28.3 + k8s.io/apiserver v0.28.12 + k8s.io/pod-security-admission v0.28.12 ) require ( @@ -148,13 +148,13 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.0.0 // indirect - k8s.io/cloud-provider v0.28.3 // indirect - k8s.io/component-helpers v0.28.3 // indirect - k8s.io/controller-manager v0.28.3 // indirect - k8s.io/kms v0.28.3 // indirect + k8s.io/cloud-provider v0.28.12 // indirect + k8s.io/component-helpers v0.28.12 // indirect + k8s.io/controller-manager v0.28.12 // indirect + k8s.io/kms v0.28.12 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect k8s.io/kubectl v0.0.0 // indirect - k8s.io/kubelet v0.28.3 // indirect + k8s.io/kubelet v0.28.12 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 // indirect sigs.k8s.io/cloud-provider-azure/pkg/azclient v0.0.0-20231101022055-5e1cc4addf97 sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect @@ -167,33 +167,33 @@ require ( ) replace ( - k8s.io/api => k8s.io/api v0.28.3 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.3 - k8s.io/apimachinery => k8s.io/apimachinery v0.28.3 - k8s.io/apiserver => k8s.io/apiserver v0.28.3 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.3 - k8s.io/client-go => k8s.io/client-go v0.28.3 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.3 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.3 - k8s.io/code-generator => k8s.io/code-generator v0.28.3 - k8s.io/component-base => k8s.io/component-base v0.28.3 - k8s.io/component-helpers => k8s.io/component-helpers v0.28.3 - k8s.io/controller-manager => k8s.io/controller-manager v0.28.3 - k8s.io/cri-api => k8s.io/cri-api v0.28.3 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.3 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.3 - k8s.io/endpointslice => k8s.io/endpointslice v0.28.3 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.3 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.3 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.3 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.3 - k8s.io/kubectl => k8s.io/kubectl v0.28.3 - k8s.io/kubelet => k8s.io/kubelet v0.28.3 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.3 - k8s.io/metrics => k8s.io/metrics v0.28.3 - k8s.io/mount-utils => k8s.io/mount-utils v0.28.3 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.3 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.3 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.28.3 - k8s.io/sample-controller => k8s.io/sample-controller v0.28.3 + k8s.io/api => k8s.io/api v0.28.12 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.12 + k8s.io/apimachinery => k8s.io/apimachinery v0.28.12 + k8s.io/apiserver => k8s.io/apiserver v0.28.12 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.12 + k8s.io/client-go => k8s.io/client-go v0.28.12 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.12 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.12 + k8s.io/code-generator => k8s.io/code-generator v0.28.12 + k8s.io/component-base => k8s.io/component-base v0.28.12 + k8s.io/component-helpers => k8s.io/component-helpers v0.28.12 + k8s.io/controller-manager => k8s.io/controller-manager v0.28.12 + k8s.io/cri-api => k8s.io/cri-api v0.28.12 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.12 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.12 + k8s.io/endpointslice => k8s.io/endpointslice v0.28.12 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.12 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.12 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.12 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.12 + k8s.io/kubectl => k8s.io/kubectl v0.28.12 + k8s.io/kubelet => k8s.io/kubelet v0.28.12 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.12 + k8s.io/metrics => k8s.io/metrics v0.28.12 + k8s.io/mount-utils => k8s.io/mount-utils v0.28.12 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.12 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.12 + k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.28.12 + k8s.io/sample-controller => k8s.io/sample-controller v0.28.12 ) diff --git a/go.sum b/go.sum index 8021bf5fc..855845a4f 100644 --- a/go.sum +++ b/go.sum @@ -421,42 +421,42 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.28.3 h1:Gj1HtbSdB4P08C8rs9AR94MfSGpRhJgsS+GF9V26xMM= -k8s.io/api v0.28.3/go.mod h1:MRCV/jr1dW87/qJnZ57U5Pak65LGmQVkKTzf3AtKFHc= -k8s.io/apiextensions-apiserver v0.28.3 h1:Od7DEnhXHnHPZG+W9I97/fSQkVpVPQx2diy+2EtmY08= -k8s.io/apiextensions-apiserver v0.28.3/go.mod h1:NE1XJZ4On0hS11aWWJUTNkmVB03j9LM7gJSisbRt8Lc= -k8s.io/apimachinery v0.28.3 h1:B1wYx8txOaCQG0HmYF6nbpU8dg6HvA06x5tEffvOe7A= -k8s.io/apimachinery v0.28.3/go.mod h1:uQTKmIqs+rAYaq+DFaoD2X7pcjLOqbQX2AOiO0nIpb8= -k8s.io/apiserver v0.28.3 h1:8Ov47O1cMyeDzTXz0rwcfIIGAP/dP7L8rWbEljRcg5w= -k8s.io/apiserver v0.28.3/go.mod h1:YIpM+9wngNAv8Ctt0rHG4vQuX/I5rvkEMtZtsxW2rNM= -k8s.io/client-go v0.28.3 h1:2OqNb72ZuTZPKCl+4gTKvqao0AMOl9f3o2ijbAj3LI4= -k8s.io/client-go v0.28.3/go.mod h1:LTykbBp9gsA7SwqirlCXBWtK0guzfhpoW4qSm7i9dxo= -k8s.io/cloud-provider v0.28.3 h1:9u+JjA3zIn0nqLOOa8tWnprFkffguSAhfBvo8p7LhBQ= -k8s.io/cloud-provider v0.28.3/go.mod h1:shAJxdrKu+SwwGUhkodxByPjaH8KBFZqXo6jU1F0ehI= -k8s.io/component-base v0.28.3 h1:rDy68eHKxq/80RiMb2Ld/tbH8uAE75JdCqJyi6lXMzI= -k8s.io/component-base v0.28.3/go.mod h1:fDJ6vpVNSk6cRo5wmDa6eKIG7UlIQkaFmZN2fYgIUD8= -k8s.io/component-helpers v0.28.3 h1:te9ieTGzcztVktUs92X53P6BamAoP73MK0qQP0WmDqc= -k8s.io/component-helpers v0.28.3/go.mod h1:oJR7I9ist5UAQ3y/CTdbw6CXxdMZ1Lw2Ua/EZEwnVLs= -k8s.io/controller-manager v0.28.3 h1:2s0wBvrGuRwMYEnl5Ed+qkK1kAfZR6H+0Ut1R2tHLRg= -k8s.io/controller-manager v0.28.3/go.mod h1:lYu5hxBVmfK5NrpmeVrioPH4ROnE4OxmUM3xx6JWlLs= -k8s.io/csi-translation-lib v0.28.3 h1:7deV+HZjV418AGikSDPW8dyzTpm4K3tNbQUp3KmR7cs= -k8s.io/csi-translation-lib v0.28.3/go.mod h1:zlrYwakCz2yji9/8EaJk+afIKPrYXPNXXLDO8DVuuTk= +k8s.io/api v0.28.12 h1:C2hpsaso18pqn0Dmkfnbv/YCctozTC3KGGuZ6bF7zhQ= +k8s.io/api v0.28.12/go.mod h1:qjswI+whxvf9LAKD4sEYHfy+WgHGWeH+H5sCRQMwZAQ= +k8s.io/apiextensions-apiserver v0.28.12 h1:6GA64rylk5q0mbXfHHFVgfL1jx/4p6RU+Y+ni2DUuZc= +k8s.io/apiextensions-apiserver v0.28.12/go.mod h1:Len29ySvb/fnrXvioTxg2l6iFi97B53Bm3/jBMBllCE= +k8s.io/apimachinery v0.28.12 h1:VepMEVOi9o7L/4wMAXJq+3BK9tqBIeerTB+HSOTKeo0= +k8s.io/apimachinery v0.28.12/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o= +k8s.io/apiserver v0.28.12 h1:fvZItMw20ySP/QAU5//Ov1pJFyvrr8abeUsh3ZyF8FI= +k8s.io/apiserver v0.28.12/go.mod h1:46S3UWu620UhP5skPJ+WQWC3iWCrl1AiYJPyHxVueE4= +k8s.io/client-go v0.28.12 h1:li7iRPRQF3vDki6gTxT/kXWJvw3BkJSdjVPVhDTZQec= +k8s.io/client-go v0.28.12/go.mod h1:yEzH2Z+nEGlrnKyHJWcJsbOr5tGdIj04dj1TVQOg0wE= +k8s.io/cloud-provider v0.28.12 h1:AJd4BgDjagX6WSm5fMRA/V0rH9rteIkx7j6Jg2z9yNQ= +k8s.io/cloud-provider v0.28.12/go.mod h1:SFM1GGNoLGXROMWyuU+ovUzqVUmUk0Y8Y7O4yYnhf/M= +k8s.io/component-base v0.28.12 h1:ZNq6QFFGCPjaAzWqYHaQRoAY5seoK3vP0pZOjgxOzNc= +k8s.io/component-base v0.28.12/go.mod h1:8zI5TmGuHX6R5Lay61Ox7wb+dsEENl0NBmVSiHMQu1c= +k8s.io/component-helpers v0.28.12 h1:tHF4FcM/CxviA684futgMXhQeC2NOFPvHVKseixc7Cs= +k8s.io/component-helpers v0.28.12/go.mod h1:VbQ5E9qnr8alyAS3b3pqXKvkEOJKoj6z6PA8S+6Wlws= +k8s.io/controller-manager v0.28.12 h1:A/A14ErMuTuBW8myUCSfDr2QG5qS90ZV2DohyueAN8A= +k8s.io/controller-manager v0.28.12/go.mod h1:SEIMkdUzB4saf4sdTU2wzST6PU9zHGsTDwhoM/pVoko= +k8s.io/csi-translation-lib v0.28.12 h1:lrWqfa3AiOg3EIw/q0xPWg1ZqQyDfD1rGe5J4QFx+hA= +k8s.io/csi-translation-lib v0.28.12/go.mod h1:SXEFryzUH27XNbiI46Qz5IhfG68Pyiah8/zGrnKNrn8= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kms v0.28.3 h1:jYwwAe96XELNjYWv1G4kNzizcFoZ50OOElvPansbw70= -k8s.io/kms v0.28.3/go.mod h1:kSMjU2tg7vjqqoWVVCcmPmNZ/CofPsoTbSxAipCvZuE= +k8s.io/kms v0.28.12 h1:YEcJWelR7ChLI7le/slHpeIkx7v6MoPkITo3JyL8s1M= +k8s.io/kms v0.28.12/go.mod h1:EZtSJo9PoqEe/aB/X5sXPRl5LHukSuXlDrDnY76lJjY= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/kubectl v0.28.3 h1:H1Peu1O3EbN9zHkJCcvhiJ4NUj6lb88sGPO5wrWIM6k= -k8s.io/kubectl v0.28.3/go.mod h1:RDAudrth/2wQ3Sg46fbKKl4/g+XImzvbsSRZdP2RiyE= -k8s.io/kubelet v0.28.3 h1:bp/uIf1R5F61BlFvFtzc4PDEiK7TtFcw3wFJlc0V0LM= -k8s.io/kubelet v0.28.3/go.mod h1:E3NHYbp/v45Ao6AD0EOZnqO3L0R6Haks6Nm0+bnFwtU= -k8s.io/kubernetes v1.28.4 h1:aRNxs5jb8FVTtlnxeA4FSDBVKuFwA8Gw40/U2zReBYA= -k8s.io/kubernetes v1.28.4/go.mod h1:BTzDCKYAlu6LL9ITbfjwgwIrJ30hlTgbv0eXDoA/WoA= -k8s.io/mount-utils v0.28.3 h1:1p6Dk2QhoK0IYOee2MOec/90a7fC0yUqlWPfQy/4JFE= -k8s.io/mount-utils v0.28.3/go.mod h1:ceMAZ+Nzlk8zOwN205YXXGJRGmf1o0/XIwsKnG44p0I= -k8s.io/pod-security-admission v0.28.3 h1:CtVVG36YwniCH4d18wAoFW6n0Qm5Z1uUVfDIiO4kY0I= -k8s.io/pod-security-admission v0.28.3/go.mod h1:qm+gZ8FdnxBgVVTZfSjlK/oeBosmvECBdl92RWuWxhI= +k8s.io/kubectl v0.28.12 h1:CyGVOUO83jYxwLI5XtBFNoerAQj47fnEDrCPKWxlAi8= +k8s.io/kubectl v0.28.12/go.mod h1:KzG7ROxXnUqfS6S+xJIIbd6WMpFYNByyxqNkIhxS6Qs= +k8s.io/kubelet v0.28.12 h1:ACRS1b6XxIxAJoOJ95bsy0qm0DoxD6h/Dwi4U6Pot74= +k8s.io/kubelet v0.28.12/go.mod h1:DYlF/KqAA4WoiBElCjeDKGv2K37FLTUmTWyxMDv9s8A= +k8s.io/kubernetes v1.28.12 h1:DtWB8ZjoYiN/PXD4qDXFppf9IouVUavn6r3S+3NMUkU= +k8s.io/kubernetes v1.28.12/go.mod h1:chlmcCDBnOA/y+572cw8dO0Rci1wiA8bm5+zhPdFLCk= +k8s.io/mount-utils v0.28.12 h1:AUHMf700dF+Oes67uVTPU60Z4mtqtyIRNEJl08fWiQU= +k8s.io/mount-utils v0.28.12/go.mod h1:ZxAFXgKzcAyi3VTd2pKFlZFswl9Q/cveJ5aptdjQOuc= +k8s.io/pod-security-admission v0.28.12 h1:v6kTu1bYj95uAUyuAbkGbRpw4iN+kRL3jTxIXJzofrk= +k8s.io/pod-security-admission v0.28.12/go.mod h1:CZq1eSToAggIIojQ7GEa77TEJMH5g7B5R22rI9KlfAU= k8s.io/utils v0.0.0-20230505201702-9f6742963106 h1:EObNQ3TW2D+WptiYXlApGNLVy0zm/JIBVY9i+M4wpAU= k8s.io/utils v0.0.0-20230505201702-9f6742963106/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.2 h1:trsWhjU5jZrx6UvFu4WzQDrN7Pga4a7Qg+zcfcj64PA= diff --git a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go index 38b80a304..3da554944 100644 --- a/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go +++ b/vendor/k8s.io/apiserver/pkg/admission/plugin/cel/composition.go @@ -177,7 +177,7 @@ func (a *variableAccessor) Callback(_ *lazy.MapValue) ref.Val { return types.NewErr("composited variable %q fails to compile: %v", a.name, a.result.Error) } - v, details, err := a.result.Program.Eval(a.activation) + v, details, err := a.result.Program.ContextEval(a.context, a.activation) if details == nil { return types.NewErr("unable to get evaluation details of variable %q", a.name) } diff --git a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go index 61a7fd70d..254a22348 100644 --- a/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go +++ b/vendor/k8s.io/apiserver/pkg/endpoints/discovery/aggregated/handler.go @@ -229,7 +229,6 @@ func (rdm *resourceDiscoveryManager) AddGroupVersion(source Source, groupName st } func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupName string, value apidiscoveryv2beta1.APIVersionDiscovery) { - klog.Infof("Adding GroupVersion %s %s to ResourceManager", groupName, value.Version) if rdm.apiGroups == nil { rdm.apiGroups = make(map[groupKey]*apidiscoveryv2beta1.APIGroupDiscovery) @@ -273,6 +272,7 @@ func (rdm *resourceDiscoveryManager) addGroupVersionLocked(source Source, groupN } rdm.apiGroups[key] = group } + klog.Infof("Adding GroupVersion %s %s to ResourceManager", groupName, value.Version) gv := metav1.GroupVersion{Group: groupName, Version: value.Version} gvKey := groupVersionKey{ diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go index 2276e2daf..d06447a7e 100644 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiserver/pkg/features/kube_features.go @@ -166,6 +166,13 @@ const ( // Deprecates and removes SelfLink from ObjectMeta and ListMeta. RemoveSelfLink featuregate.Feature = "RemoveSelfLink" + // owner: @serathius + // beta: v1.30 + // + // Allow watch cache to create a watch on a dedicated RPC. + // This prevents watch cache from being starved by other watches. + SeparateCacheWatchRPC featuregate.Feature = "SeparateCacheWatchRPC" + // owner: @apelisse, @lavalamp // alpha: v1.14 // beta: v1.16 @@ -222,6 +229,12 @@ const ( // Enables support for watch bookmark events. WatchBookmark featuregate.Feature = "WatchBookmark" + // owner: @serathius + // beta: 1.30 + // Enables watches without resourceVersion to be served from storage. + // Used to prevent https://github.com/kubernetes/kubernetes/issues/123072 until etcd fixes the issue. + WatchFromStorageWithoutResourceVersion featuregate.Feature = "WatchFromStorageWithoutResourceVersion" + // owner: @vinaykul // kep: http://kep.k8s.io/1287 // alpha: v1.27 @@ -286,6 +299,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS RemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, + ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 @@ -298,6 +313,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, + WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, + InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha}, WatchList: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/vendor/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/apiserver/pkg/server/config.go index d678f52df..047736e57 100644 --- a/vendor/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/apiserver/pkg/server/config.go @@ -915,7 +915,7 @@ func DefaultBuildHandlerChain(apiHandler http.Handler, c *Config) http.Handler { requestWorkEstimator := flowcontrolrequest.NewWorkEstimator( c.StorageObjectCountTracker.Get, c.FlowControl.GetInterestedWatchCount, workEstimatorCfg, c.FlowControl.GetMaxSeats) handler = filterlatency.TrackCompleted(handler) - handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator) + handler = genericfilters.WithPriorityAndFairness(handler, c.LongRunningFunc, c.FlowControl, requestWorkEstimator, c.RequestTimeout/4) handler = filterlatency.TrackStarted(handler, c.TracerProvider, "priorityandfairness") } else { handler = genericfilters.WithMaxInFlightLimit(handler, c.MaxRequestsInFlight, c.MaxMutatingRequestsInFlight, c.LongRunningFunc) diff --git a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go index 6b3987781..05cc44263 100644 --- a/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go +++ b/vendor/k8s.io/apiserver/pkg/server/filters/priority-and-fairness.go @@ -35,6 +35,7 @@ import ( fcmetrics "k8s.io/apiserver/pkg/util/flowcontrol/metrics" flowcontrolrequest "k8s.io/apiserver/pkg/util/flowcontrol/request" "k8s.io/klog/v2" + utilsclock "k8s.io/utils/clock" ) // PriorityAndFairnessClassification identifies the results of @@ -78,6 +79,10 @@ type priorityAndFairnessHandler struct { // the purpose of computing RetryAfter header to avoid system // overload. droppedRequests utilflowcontrol.DroppedRequestsTracker + + // newReqWaitCtxFn creates a derived context with a deadline + // of how long a given request can wait in its queue. + newReqWaitCtxFn func(context.Context) (context.Context, context.CancelFunc) } func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Request) { @@ -240,8 +245,9 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque resultCh <- err }() - // We create handleCtx with explicit cancelation function. - // The reason for it is that Handle() underneath may start additional goroutine + // We create handleCtx with an adjusted deadline, for two reasons. + // One is to limit the time the request waits before its execution starts. + // The other reason for it is that Handle() underneath may start additional goroutine // that is blocked on context cancellation. However, from APF point of view, // we don't want to wait until the whole watch request is processed (which is // when it context is actually cancelled) - we want to unblock the goroutine as @@ -249,7 +255,7 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque // // Note that we explicitly do NOT call the actuall handler using that context // to avoid cancelling request too early. - handleCtx, handleCtxCancel := context.WithCancel(ctx) + handleCtx, handleCtxCancel := h.newReqWaitCtxFn(ctx) defer handleCtxCancel() // Note that Handle will return irrespective of whether the request @@ -286,7 +292,11 @@ func (h *priorityAndFairnessHandler) Handle(w http.ResponseWriter, r *http.Reque h.handler.ServeHTTP(w, r) } - h.fcIfc.Handle(ctx, digest, noteFn, estimateWork, queueNote, execute) + func() { + handleCtx, cancelFn := h.newReqWaitCtxFn(ctx) + defer cancelFn() + h.fcIfc.Handle(handleCtx, digest, noteFn, estimateWork, queueNote, execute) + }() } if !served { @@ -309,6 +319,7 @@ func WithPriorityAndFairness( longRunningRequestCheck apirequest.LongRunningRequestCheck, fcIfc utilflowcontrol.Interface, workEstimator flowcontrolrequest.WorkEstimatorFunc, + defaultRequestWaitLimit time.Duration, ) http.Handler { if fcIfc == nil { klog.Warningf("priority and fairness support not found, skipping") @@ -322,12 +333,18 @@ func WithPriorityAndFairness( waitingMark.mutatingObserver = fcmetrics.GetWaitingMutatingConcurrency() }) + clock := &utilsclock.RealClock{} + newReqWaitCtxFn := func(ctx context.Context) (context.Context, context.CancelFunc) { + return getRequestWaitContext(ctx, defaultRequestWaitLimit, clock) + } + priorityAndFairnessHandler := &priorityAndFairnessHandler{ handler: handler, longRunningRequestCheck: longRunningRequestCheck, fcIfc: fcIfc, workEstimator: workEstimator, droppedRequests: utilflowcontrol.NewDroppedRequestsTracker(), + newReqWaitCtxFn: newReqWaitCtxFn, } return http.HandlerFunc(priorityAndFairnessHandler.Handle) } @@ -356,3 +373,48 @@ func tooManyRequests(req *http.Request, w http.ResponseWriter, retryAfter string w.Header().Set("Retry-After", retryAfter) http.Error(w, "Too many requests, please try again later.", http.StatusTooManyRequests) } + +// getRequestWaitContext returns a new context with a deadline of how +// long the request is allowed to wait before it is removed from its +// queue and rejected. +// The context.CancelFunc returned must never be nil and the caller is +// responsible for calling the CancelFunc function for cleanup. +// - ctx: the context associated with the request (it may or may +// not have a deadline). +// - defaultRequestWaitLimit: the default wait duration that is used +// if the request context does not have any deadline. +// (a) initialization of a watch or +// (b) a request whose context has no deadline +// +// clock comes in handy for testing the function +func getRequestWaitContext(ctx context.Context, defaultRequestWaitLimit time.Duration, clock utilsclock.PassiveClock) (context.Context, context.CancelFunc) { + if ctx.Err() != nil { + return ctx, func() {} + } + + reqArrivedAt := clock.Now() + if reqReceivedTimestamp, ok := apirequest.ReceivedTimestampFrom(ctx); ok { + reqArrivedAt = reqReceivedTimestamp + } + + // a) we will allow the request to wait in the queue for one + // fourth of the time of its allotted deadline. + // b) if the request context does not have any deadline + // then we default to 'defaultRequestWaitLimit' + // in any case, the wait limit for any request must not + // exceed the hard limit of 1m + // + // request has deadline: + // wait-limit = min(remaining deadline / 4, 1m) + // request has no deadline: + // wait-limit = min(defaultRequestWaitLimit, 1m) + thisReqWaitLimit := defaultRequestWaitLimit + if deadline, ok := ctx.Deadline(); ok { + thisReqWaitLimit = deadline.Sub(reqArrivedAt) / 4 + } + if thisReqWaitLimit > time.Minute { + thisReqWaitLimit = time.Minute + } + + return context.WithDeadline(ctx, reqArrivedAt.Add(thisReqWaitLimit)) +} diff --git a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go index 69f8fb515..5d031e202 100644 --- a/vendor/k8s.io/apiserver/pkg/server/options/recommended.go +++ b/vendor/k8s.io/apiserver/pkg/server/options/recommended.go @@ -154,7 +154,6 @@ func (o *RecommendedOptions) ApplyTo(config *server.RecommendedConfig) error { config.SharedInformerFactory, kubernetes.NewForConfigOrDie(config.ClientConfig).FlowcontrolV1beta3(), config.MaxRequestsInFlight+config.MaxMutatingRequestsInFlight, - config.RequestTimeout/4, ) } else { klog.Warningf("Neither kubeconfig is provided nor service-account is mounted, so APIPriorityAndFairness will be disabled") diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 0796f591d..e34fb4986 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -26,6 +26,7 @@ import ( "time" "go.opentelemetry.io/otel/attribute" + "google.golang.org/grpc/metadata" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" @@ -401,10 +402,18 @@ func NewCacherFromConfig(config Config) (*Cacher, error) { // so that future reuse does not get a spurious timeout. <-cacher.timer.C } - progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock) + var contextMetadata metadata.MD + if utilfeature.DefaultFeatureGate.Enabled(features.SeparateCacheWatchRPC) { + // Add grpc context metadata to watch and progress notify requests done by cacher to: + // * Prevent starvation of watch opened by cacher, by moving it to separate Watch RPC than watch request that bypass cacher. + // * Ensure that progress notification requests are executed on the same Watch RPC as their watch, which is required for it to work. + contextMetadata = metadata.New(map[string]string{"source": "cache"}) + } + + progressRequester := newConditionalProgressRequester(config.Storage.RequestWatchProgress, config.Clock, contextMetadata) watchCache := newWatchCache( config.KeyFunc, cacher.processEvent, config.GetAttrsFunc, config.Versioner, config.Indexers, config.Clock, config.GroupResource, progressRequester) - listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc) + listerWatcher := NewListerWatcher(config.Storage, config.ResourcePrefix, config.NewListFunc, contextMetadata) reflectorName := "storage/cacher.go:" + config.ResourcePrefix reflector := cache.NewNamedReflector(reflectorName, listerWatcher, obj, watchCache, 0) @@ -517,7 +526,7 @@ func (c *Cacher) Watch(ctx context.Context, key string, opts storage.ListOptions if !utilfeature.DefaultFeatureGate.Enabled(features.WatchList) && opts.SendInitialEvents != nil { opts.SendInitialEvents = nil } - if opts.SendInitialEvents == nil && opts.ResourceVersion == "" { + if utilfeature.DefaultFeatureGate.Enabled(features.WatchFromStorageWithoutResourceVersion) && opts.SendInitialEvents == nil && opts.ResourceVersion == "" { return c.storage.Watch(ctx, key, opts) } requestedWatchRV, err := c.versioner.ParseResourceVersion(opts.ResourceVersion) diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go index 1252e5e34..2817a93dd 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/lister_watcher.go @@ -19,6 +19,8 @@ package cacher import ( "context" + "google.golang.org/grpc/metadata" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" @@ -30,17 +32,19 @@ import ( // listerWatcher opaques storage.Interface to expose cache.ListerWatcher. type listerWatcher struct { - storage storage.Interface - resourcePrefix string - newListFunc func() runtime.Object + storage storage.Interface + resourcePrefix string + newListFunc func() runtime.Object + contextMetadata metadata.MD } // NewListerWatcher returns a storage.Interface backed ListerWatcher. -func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object) cache.ListerWatcher { +func NewListerWatcher(storage storage.Interface, resourcePrefix string, newListFunc func() runtime.Object, contextMetadata metadata.MD) cache.ListerWatcher { return &listerWatcher{ - storage: storage, - resourcePrefix: resourcePrefix, - newListFunc: newListFunc, + storage: storage, + resourcePrefix: resourcePrefix, + newListFunc: newListFunc, + contextMetadata: contextMetadata, } } @@ -59,7 +63,11 @@ func (lw *listerWatcher) List(options metav1.ListOptions) (runtime.Object, error Predicate: pred, Recursive: true, } - if err := lw.storage.GetList(context.TODO(), lw.resourcePrefix, storageOpts, list); err != nil { + ctx := context.Background() + if lw.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata) + } + if err := lw.storage.GetList(ctx, lw.resourcePrefix, storageOpts, list); err != nil { return nil, err } return list, nil @@ -73,5 +81,9 @@ func (lw *listerWatcher) Watch(options metav1.ListOptions) (watch.Interface, err Recursive: true, ProgressNotify: true, } - return lw.storage.Watch(context.TODO(), lw.resourcePrefix, opts) + ctx := context.Background() + if lw.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, lw.contextMetadata) + } + return lw.storage.Watch(ctx, lw.resourcePrefix, opts) } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go index c26eb55da..c27ca053b 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_cache.go @@ -492,8 +492,7 @@ func (s sortableStoreElements) Swap(i, j int) { // WaitUntilFreshAndList returns list of pointers to `storeElement` objects along // with their ResourceVersion and the name of the index, if any, that was used. -func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) ([]interface{}, uint64, string, error) { - var err error +func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion uint64, matchValues []storage.MatchValue) (result []interface{}, rv uint64, index string, err error) { if utilfeature.DefaultFeatureGate.Enabled(features.ConsistentListFromCache) && w.notFresh(resourceVersion) { w.waitingUntilFresh.Add() err = w.waitUntilFreshAndBlock(ctx, resourceVersion) @@ -501,12 +500,14 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion } else { err = w.waitUntilFreshAndBlock(ctx, resourceVersion) } + + defer func() { sort.Sort(sortableStoreElements(result)) }() defer w.RUnlock() if err != nil { - return nil, 0, "", err + return result, rv, index, err } - result, rv, index, err := func() ([]interface{}, uint64, string, error) { + result, rv, index, err = func() ([]interface{}, uint64, string, error) { // This isn't the place where we do "final filtering" - only some "prefiltering" is happening here. So the only // requirement here is to NOT miss anything that should be returned. We can return as many non-matching items as we // want - they will be filtered out later. The fact that we return less things is only further performance improvement. @@ -519,7 +520,6 @@ func (w *watchCache) WaitUntilFreshAndList(ctx context.Context, resourceVersion return w.store.List(), w.resourceVersion, "", nil }() - sort.Sort(sortableStoreElements(result)) return result, rv, index, err } diff --git a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go index f44ca9325..13f50bc18 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go +++ b/vendor/k8s.io/apiserver/pkg/storage/cacher/watch_progress.go @@ -21,6 +21,8 @@ import ( "sync" "time" + "google.golang.org/grpc/metadata" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" @@ -34,10 +36,11 @@ const ( progressRequestPeriod = 100 * time.Millisecond ) -func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory) *conditionalProgressRequester { +func newConditionalProgressRequester(requestWatchProgress WatchProgressRequester, clock TickerFactory, contextMetadata metadata.MD) *conditionalProgressRequester { pr := &conditionalProgressRequester{ clock: clock, requestWatchProgress: requestWatchProgress, + contextMetadata: contextMetadata, } pr.cond = sync.NewCond(pr.mux.RLocker()) return pr @@ -54,6 +57,7 @@ type TickerFactory interface { type conditionalProgressRequester struct { clock TickerFactory requestWatchProgress WatchProgressRequester + contextMetadata metadata.MD mux sync.RWMutex cond *sync.Cond @@ -63,6 +67,9 @@ type conditionalProgressRequester struct { func (pr *conditionalProgressRequester) Run(stopCh <-chan struct{}) { ctx := wait.ContextForChannel(stopCh) + if pr.contextMetadata != nil { + ctx = metadata.NewOutgoingContext(ctx, pr.contextMetadata) + } go func() { defer utilruntime.HandleCrash() <-stopCh diff --git a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go index ac023d55d..e3147323d 100644 --- a/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/storage/etcd3/metrics/metrics.go @@ -84,7 +84,7 @@ var ( }, []string{"endpoint"}, ) - storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"cluster"}, nil, compbasemetrics.ALPHA, "") + storageSizeDescription = compbasemetrics.NewDesc("apiserver_storage_size_bytes", "Size of the storage database file physically allocated in bytes.", []string{"storage_cluster_id"}, nil, compbasemetrics.ALPHA, "") storageMonitor = &monitorCollector{monitorGetter: func() ([]Monitor, error) { return nil, nil }} etcdEventsReceivedCounts = compbasemetrics.NewCounterVec( &compbasemetrics.CounterOpts{ @@ -274,21 +274,21 @@ func (c *monitorCollector) CollectWithStability(ch chan<- compbasemetrics.Metric } for i, m := range monitors { - cluster := fmt.Sprintf("etcd-%d", i) + storageClusterID := fmt.Sprintf("etcd-%d", i) - klog.V(4).InfoS("Start collecting storage metrics", "cluster", cluster) + klog.V(4).InfoS("Start collecting storage metrics", "storage_cluster_id", storageClusterID) ctx, cancel := context.WithTimeout(context.Background(), time.Second) metrics, err := m.Monitor(ctx) cancel() m.Close() if err != nil { - klog.InfoS("Failed to get storage metrics", "cluster", cluster, "err", err) + klog.InfoS("Failed to get storage metrics", "storage_cluster_id", storageClusterID, "err", err) continue } - metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), cluster) + metric, err := compbasemetrics.NewConstMetric(storageSizeDescription, compbasemetrics.GaugeValue, float64(metrics.Size), storageClusterID) if err != nil { - klog.ErrorS(err, "Failed to create metric", "cluster", cluster) + klog.ErrorS(err, "Failed to create metric", "storage_cluster_id", storageClusterID) } ch <- metric } diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go index 708bf2cde..8c90811bf 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_controller.go @@ -150,9 +150,6 @@ type configController struct { // from server configuration. serverConcurrencyLimit int - // requestWaitLimit comes from server configuration. - requestWaitLimit time.Duration - // watchTracker implements the necessary WatchTracker interface. WatchTracker @@ -263,9 +260,15 @@ type seatDemandStats struct { } func (stats *seatDemandStats) update(obs fq.IntegratorResults) { + stats.highWatermark = obs.Max + if obs.Duration <= 0 { + return + } + if math.IsNaN(obs.Deviation) { + obs.Deviation = 0 + } stats.avg = obs.Average stats.stdDev = obs.Deviation - stats.highWatermark = obs.Max envelope := obs.Average + obs.Deviation stats.smoothed = math.Max(envelope, seatDemandSmoothingCoefficient*stats.smoothed+(1-seatDemandSmoothingCoefficient)*envelope) } @@ -281,13 +284,12 @@ func newTestableController(config TestableConfig) *configController { asFieldManager: config.AsFieldManager, foundToDangling: config.FoundToDangling, serverConcurrencyLimit: config.ServerConcurrencyLimit, - requestWaitLimit: config.RequestWaitLimit, flowcontrolClient: config.FlowcontrolClient, priorityLevelStates: make(map[string]*priorityLevelState), WatchTracker: NewWatchTracker(), MaxSeatsTracker: NewMaxSeatsTracker(), } - klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, requestWaitLimit=%s, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.requestWaitLimit, cfgCtlr.name, cfgCtlr.asFieldManager) + klog.V(2).Infof("NewTestableController %q with serverConcurrencyLimit=%d, name=%s, asFieldManager=%q", cfgCtlr.name, cfgCtlr.serverConcurrencyLimit, cfgCtlr.name, cfgCtlr.asFieldManager) // Start with longish delay because conflicts will be between // different processes, so take some time to go away. cfgCtlr.configQueue = workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(200*time.Millisecond, 8*time.Hour), "priority_and_fairness_config_queue") @@ -427,7 +429,7 @@ func (cfgCtlr *configController) updateBorrowingLocked(setCompleters bool, plSta plState := plStates[plName] if setCompleters { qsCompleter, err := queueSetCompleterForPL(cfgCtlr.queueSetFactory, plState.queues, - plState.pl, cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs, + plState.pl, plState.reqsGaugePair, plState.execSeatsObs, metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge)) if err != nil { klog.ErrorS(err, "Inconceivable! Configuration error in existing priority level", "pl", plState.pl) @@ -651,10 +653,10 @@ func (cfgCtlr *configController) lockAndDigestConfigObjects(newPLs []*flowcontro // Supply missing mandatory PriorityLevelConfiguration objects if !meal.haveExemptPL { - meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt, cfgCtlr.requestWaitLimit) + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationExempt) } if !meal.haveCatchAllPL { - meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll, cfgCtlr.requestWaitLimit) + meal.imaginePL(fcboot.MandatoryPriorityLevelConfigurationCatchAll) } meal.finishQueueSetReconfigsLocked() @@ -686,7 +688,7 @@ func (meal *cfgMeal) digestNewPLsLocked(newPLs []*flowcontrol.PriorityLevelConfi } } qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, state.queues, - pl, meal.cfgCtlr.requestWaitLimit, state.reqsGaugePair, state.execSeatsObs, + pl, state.reqsGaugePair, state.execSeatsObs, metrics.NewUnionGauge(state.seatDemandIntegrator, state.seatDemandRatioedGauge)) if err != nil { klog.Warningf("Ignoring PriorityLevelConfiguration object %s because its spec (%s) is broken: %s", pl.Name, fcfmt.Fmt(pl.Spec), err) @@ -792,7 +794,7 @@ func (meal *cfgMeal) processOldPLsLocked() { } var err error plState.qsCompleter, err = queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, plState.queues, - plState.pl, meal.cfgCtlr.requestWaitLimit, plState.reqsGaugePair, plState.execSeatsObs, + plState.pl, plState.reqsGaugePair, plState.execSeatsObs, metrics.NewUnionGauge(plState.seatDemandIntegrator, plState.seatDemandRatioedGauge)) if err != nil { // This can not happen because queueSetCompleterForPL already approved this config @@ -874,7 +876,7 @@ func (meal *cfgMeal) finishQueueSetReconfigsLocked() { // queueSetCompleterForPL returns an appropriate QueueSetCompleter for the // given priority level configuration. Returns nil and an error if the given // object is malformed in a way that is a problem for this package. -func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) { +func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flowcontrol.PriorityLevelConfiguration, reqsIntPair metrics.RatioedGaugePair, execSeatsObs metrics.RatioedGauge, seatDemandGauge metrics.Gauge) (fq.QueueSetCompleter, error) { if (pl.Spec.Type == flowcontrol.PriorityLevelEnablementLimited) != (pl.Spec.Limited != nil) { return nil, errors.New("broken union structure at the top, for Limited") } @@ -896,7 +898,6 @@ func queueSetCompleterForPL(qsf fq.QueueSetFactory, queues fq.QueueSet, pl *flow DesiredNumQueues: int(qcAPI.Queues), QueueLengthLimit: int(qcAPI.QueueLengthLimit), HandSize: int(qcAPI.HandSize), - RequestWaitLimit: requestWaitLimit, } } } else { @@ -950,16 +951,15 @@ func (meal *cfgMeal) presyncFlowSchemaStatus(fs *flowcontrol.FlowSchema, isDangl // imaginePL adds a priority level based on one of the mandatory ones // that does not actually exist (right now) as a real API object. -func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration, requestWaitLimit time.Duration) { +func (meal *cfgMeal) imaginePL(proto *flowcontrol.PriorityLevelConfiguration) { klog.V(3).Infof("No %s PriorityLevelConfiguration found, imagining one", proto.Name) labelValues := []string{proto.Name} reqsGaugePair := metrics.RatioedGaugeVecPhasedElementPair(meal.cfgCtlr.reqsGaugeVec, 1, 1, labelValues) execSeatsObs := meal.cfgCtlr.execSeatsGaugeVec.NewForLabelValuesSafe(0, 1, labelValues) seatDemandIntegrator := fq.NewNamedIntegrator(meal.cfgCtlr.clock, proto.Name) seatDemandRatioedGauge := metrics.ApiserverSeatDemands.NewForLabelValuesSafe(0, 1, []string{proto.Name}) - qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, - requestWaitLimit, reqsGaugePair, execSeatsObs, - metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge)) + qsCompleter, err := queueSetCompleterForPL(meal.cfgCtlr.queueSetFactory, nil, proto, reqsGaugePair, + execSeatsObs, metrics.NewUnionGauge(seatDemandIntegrator, seatDemandRatioedGauge)) if err != nil { // This can not happen because proto is one of the mandatory // objects and these are not erroneous diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go index 76782623a..05f4f5e53 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/apf_filter.go @@ -90,7 +90,6 @@ func New( informerFactory kubeinformers.SharedInformerFactory, flowcontrolClient flowcontrolclient.FlowcontrolV1beta3Interface, serverConcurrencyLimit int, - requestWaitLimit time.Duration, ) Interface { clk := eventclock.Real{} return NewTestable(TestableConfig{ @@ -101,7 +100,6 @@ func New( InformerFactory: informerFactory, FlowcontrolClient: flowcontrolClient, ServerConcurrencyLimit: serverConcurrencyLimit, - RequestWaitLimit: requestWaitLimit, ReqsGaugeVec: metrics.PriorityLevelConcurrencyGaugeVec, ExecSeatsGaugeVec: metrics.PriorityLevelExecutionSeatsGaugeVec, QueueSetFactory: fqs.NewQueueSetFactory(clk), @@ -139,9 +137,6 @@ type TestableConfig struct { // ServerConcurrencyLimit for the controller to enforce ServerConcurrencyLimit int - // RequestWaitLimit configured on the server - RequestWaitLimit time.Duration - // GaugeVec for metrics about requests, broken down by phase and priority_level ReqsGaugeVec metrics.RatioedGaugeVec diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go index 013fd41e0..3b0ad1638 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/interface.go @@ -18,7 +18,6 @@ package fairqueuing import ( "context" - "time" "k8s.io/apiserver/pkg/util/flowcontrol/debug" "k8s.io/apiserver/pkg/util/flowcontrol/metrics" @@ -117,7 +116,7 @@ type QueuingConfig struct { // DesiredNumQueues is the number of queues that the API says // should exist now. This may be non-positive, in which case - // QueueLengthLimit, HandSize, and RequestWaitLimit are ignored. + // QueueLengthLimit, and HandSize are ignored. // A value of zero means to respect the ConcurrencyLimit of the DispatchingConfig. // A negative value means to always dispatch immediately upon arrival // (i.e., the requests are "exempt" from limitation). @@ -129,10 +128,6 @@ type QueuingConfig struct { // HandSize is a parameter of shuffle sharding. Upon arrival of a request, a queue is chosen by randomly // dealing a "hand" of this many queues and then picking one of minimum length. HandSize int - - // RequestWaitLimit is the maximum amount of time that a request may wait in a queue. - // If, by the end of that time, the request has not been dispatched then it is rejected. - RequestWaitLimit time.Duration } // DispatchingConfig defines the configuration of the dispatching aspect of a QueueSet. diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go index aa54a9ccf..604e0862a 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/fairqueuing/queueset/queueset.go @@ -272,7 +272,6 @@ func (qs *queueSet) setConfiguration(ctx context.Context, qCfg fq.QueuingConfig, } else { qCfg.QueueLengthLimit = qs.qCfg.QueueLengthLimit qCfg.HandSize = qs.qCfg.HandSize - qCfg.RequestWaitLimit = qs.qCfg.RequestWaitLimit } qs.qCfg = qCfg @@ -300,9 +299,6 @@ const ( // Serve this one decisionExecute requestDecision = iota - // Reject this one due to APF queuing considerations - decisionReject - // This one's context timed out / was canceled decisionCancel ) @@ -337,11 +333,10 @@ func (qs *queueSet) StartRequest(ctx context.Context, workEstimate *fqrequest.Wo // ======================================================================== // Step 1: // 1) Start with shuffle sharding, to pick a queue. - // 2) Reject old requests that have been waiting too long - // 3) Reject current request if there is not enough concurrency shares and + // 2) Reject current request if there is not enough concurrency shares and // we are at max queue length - // 4) If not rejected, create a request and enqueue - req = qs.timeoutOldRequestsAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) + // 3) If not rejected, create a request and enqueue + req = qs.shuffleShardAndRejectOrEnqueueLocked(ctx, workEstimate, hashValue, flowDistinguisher, fsName, descr1, descr2, queueNoteFn) // req == nil means that the request was rejected - no remaining // concurrency shares and at max queue length already if req == nil { @@ -422,13 +417,7 @@ func (req *request) wait() (bool, bool) { } req.waitStarted = true switch decisionAny { - case decisionReject: - klog.V(5).Infof("QS(%s): request %#+v %#+v timed out after being enqueued\n", qs.qCfg.Name, req.descr1, req.descr2) - qs.totRequestsRejected++ - qs.totRequestsTimedout++ - metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out") - return false, qs.isIdleLocked() - case decisionCancel: + case decisionCancel: // handle in code following this switch case decisionExecute: klog.V(5).Infof("QS(%s): Dispatching request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) return true, false @@ -438,7 +427,7 @@ func (req *request) wait() (bool, bool) { } // TODO(aaron-prindle) add metrics for this case klog.V(5).Infof("QS(%s): Ejecting request %#+v %#+v from its queue", qs.qCfg.Name, req.descr1, req.descr2) - // remove the request from the queue as it has timed out + // remove the request from the queue as its queue wait time has exceeded queue := req.queue if req.removeFromQueueLocked() != nil { defer qs.boundNextDispatchLocked(queue) @@ -446,8 +435,9 @@ func (req *request) wait() (bool, bool) { qs.totSeatsWaiting -= req.MaxSeats() qs.totRequestsRejected++ qs.totRequestsCancelled++ - metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "cancelled") + metrics.AddReject(req.ctx, qs.qCfg.Name, req.fsName, "time-out") metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) + metrics.AddSeatsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -req.MaxSeats()) req.NoteQueued(false) qs.reqsGaugePair.RequestsWaiting.Add(-1) qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting)) @@ -555,25 +545,19 @@ func (qs *queueSet) getVirtualTimeRatioLocked() float64 { return math.Min(float64(seatsRequested), float64(qs.dCfg.ConcurrencyLimit)) / float64(activeQueues) } -// timeoutOldRequestsAndRejectOrEnqueueLocked encapsulates the logic required +// shuffleShardAndRejectOrEnqueueLocked encapsulates the logic required // to validate and enqueue a request for the queueSet/QueueSet: // 1) Start with shuffle sharding, to pick a queue. -// 2) Reject old requests that have been waiting too long -// 3) Reject current request if there is not enough concurrency shares and +// 2) Reject current request if there is not enough concurrency shares and // we are at max queue length -// 4) If not rejected, create a request and enqueue +// 3) If not rejected, create a request and enqueue // returns the enqueud request on a successful enqueue // returns nil in the case that there is no available concurrency or // the queuelengthlimit has been reached -func (qs *queueSet) timeoutOldRequestsAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { +func (qs *queueSet) shuffleShardAndRejectOrEnqueueLocked(ctx context.Context, workEstimate *fqrequest.WorkEstimate, hashValue uint64, flowDistinguisher, fsName string, descr1, descr2 interface{}, queueNoteFn fq.QueueNoteFn) *request { // Start with the shuffle sharding, to pick a queue. queueIdx := qs.shuffleShardLocked(hashValue, descr1, descr2) queue := qs.queues[queueIdx] - // The next step is the logic to reject requests that have been waiting too long - qs.removeTimedOutRequestsFromQueueToBoundLocked(queue, fsName) - // NOTE: currently timeout is only checked for each new request. This means that there can be - // requests that are in the queue longer than the timeout if there are no new requests - // We prefer the simplicity over the promptness, at least for now. defer qs.boundNextDispatchLocked(queue) @@ -632,43 +616,6 @@ func (qs *queueSet) shuffleShardLocked(hashValue uint64, descr1, descr2 interfac return bestQueueIdx } -// removeTimedOutRequestsFromQueueToBoundLocked rejects old requests that have been enqueued -// past the requestWaitLimit -func (qs *queueSet) removeTimedOutRequestsFromQueueToBoundLocked(queue *queue, fsName string) { - timeoutCount := 0 - disqueueSeats := 0 - now := qs.clock.Now() - reqs := queue.requestsWaiting - // reqs are sorted oldest -> newest - // can short circuit loop (break) if oldest requests are not timing out - // as newer requests also will not have timed out - - // now - requestWaitLimit = arrivalLimit - arrivalLimit := now.Add(-qs.qCfg.RequestWaitLimit) - reqs.Walk(func(req *request) bool { - if arrivalLimit.After(req.arrivalTime) { - if req.decision.Set(decisionReject) && req.removeFromQueueLocked() != nil { - timeoutCount++ - disqueueSeats += req.MaxSeats() - req.NoteQueued(false) - metrics.AddRequestsInQueues(req.ctx, qs.qCfg.Name, req.fsName, -1) - } - // we need to check if the next request has timed out. - return true - } - // since reqs are sorted oldest -> newest, we are done here. - return false - }) - - // remove timed out requests from queue - if timeoutCount > 0 { - qs.totRequestsWaiting -= timeoutCount - qs.totSeatsWaiting -= disqueueSeats - qs.reqsGaugePair.RequestsWaiting.Add(float64(-timeoutCount)) - qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting)) - } -} - // rejectOrEnqueueToBoundLocked rejects or enqueues the newly arrived // request, which has been assigned to a queue. If up against the // queue length limit and the concurrency limit then returns false. @@ -702,6 +649,7 @@ func (qs *queueSet) enqueueToBoundLocked(request *request) { qs.totRequestsWaiting++ qs.totSeatsWaiting += request.MaxSeats() metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, 1) + metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, request.MaxSeats()) request.NoteQueued(true) qs.reqsGaugePair.RequestsWaiting.Add(1) qs.seatDemandIntegrator.Set(float64(qs.totSeatsInUse + qs.totSeatsWaiting)) @@ -760,6 +708,7 @@ func (qs *queueSet) dispatchLocked() bool { qs.totRequestsWaiting-- qs.totSeatsWaiting -= request.MaxSeats() metrics.AddRequestsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -1) + metrics.AddSeatsInQueues(request.ctx, qs.qCfg.Name, request.fsName, -request.MaxSeats()) request.NoteQueued(false) qs.reqsGaugePair.RequestsWaiting.Add(-1) defer qs.boundNextDispatchLocked(queue) diff --git a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go index 54af4415c..9fe7b15a0 100644 --- a/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go +++ b/vendor/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go @@ -210,6 +210,16 @@ var ( }, []string{priorityLevel, flowSchema}, ) + apiserverCurrentInqueueSeats = compbasemetrics.NewGaugeVec( + &compbasemetrics.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "current_inqueue_seats", + Help: "Number of seats currently pending in queues of the API Priority and Fairness subsystem", + StabilityLevel: compbasemetrics.ALPHA, + }, + []string{priorityLevel, flowSchema}, + ) apiserverRequestQueueLength = compbasemetrics.NewHistogramVec( &compbasemetrics.HistogramOpts{ Namespace: namespace, @@ -455,6 +465,7 @@ var ( apiserverNextSBounds, apiserverNextDiscountedSBounds, apiserverCurrentInqueueRequests, + apiserverCurrentInqueueSeats, apiserverRequestQueueLength, apiserverRequestConcurrencyLimit, apiserverRequestConcurrencyInUse, @@ -518,6 +529,11 @@ func AddRequestsInQueues(ctx context.Context, priorityLevel, flowSchema string, apiserverCurrentInqueueRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) } +// AddSeatsInQueues adds the given delta to the gauge of the # of seats in the queues of the specified flowSchema and priorityLevel +func AddSeatsInQueues(ctx context.Context, priorityLevel, flowSchema string, delta int) { + apiserverCurrentInqueueSeats.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) +} + // AddRequestsExecuting adds the given delta to the gauge of executing requests of the given flowSchema and priorityLevel func AddRequestsExecuting(ctx context.Context, priorityLevel, flowSchema string, delta int) { apiserverCurrentExecutingRequests.WithLabelValues(priorityLevel, flowSchema).Add(float64(delta)) diff --git a/vendor/k8s.io/cloud-provider/cloud.go b/vendor/k8s.io/cloud-provider/cloud.go index c9a04085f..482656b37 100644 --- a/vendor/k8s.io/cloud-provider/cloud.go +++ b/vendor/k8s.io/cloud-provider/cloud.go @@ -98,6 +98,8 @@ func DefaultLoadBalancerName(service *v1.Service) string { } // GetInstanceProviderID builds a ProviderID for a node in a cloud. +// Note that if the instance does not exist, we must return ("", cloudprovider.InstanceNotFound) +// cloudprovider.InstanceNotFound should NOT be returned for instances that exist but are stopped/sleeping func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types.NodeName) (string, error) { instances, ok := cloud.Instances() if !ok { @@ -108,8 +110,11 @@ func GetInstanceProviderID(ctx context.Context, cloud Interface, nodeName types. if err == NotImplemented { return "", err } + if err == InstanceNotFound { + return "", err + } - return "", fmt.Errorf("failed to get instance ID from cloud provider: %v", err) + return "", fmt.Errorf("failed to get instance ID from cloud provider: %w", err) } return cloud.ProviderName() + "://" + instanceID, nil } diff --git a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go index 7907dfad1..fbfb3f5ea 100644 --- a/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go +++ b/vendor/k8s.io/component-base/metrics/prometheus/slis/metrics.go @@ -57,6 +57,7 @@ var ( func Register(registry k8smetrics.KubeRegistry) { registry.Register(healthcheck) registry.Register(healthchecksTotal) + _ = k8smetrics.RegisterProcessStartTime(registry.Register) } func ResetHealthMetrics() { diff --git a/vendor/k8s.io/component-helpers/storage/volume/helpers.go b/vendor/k8s.io/component-helpers/storage/volume/helpers.go index 7ec376f34..5068c6546 100644 --- a/vendor/k8s.io/component-helpers/storage/volume/helpers.go +++ b/vendor/k8s.io/component-helpers/storage/volume/helpers.go @@ -24,6 +24,20 @@ import ( "k8s.io/component-helpers/scheduling/corev1" ) +// PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. +func PersistentVolumeClaimHasClass(claim *v1.PersistentVolumeClaim) bool { + // Use beta annotation first + if _, found := claim.Annotations[v1.BetaStorageClassAnnotation]; found { + return true + } + + if claim.Spec.StorageClassName != nil { + return true + } + + return false +} + // GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was // requested, it returns "". func GetPersistentVolumeClaimClass(claim *v1.PersistentVolumeClaim) string { diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index cd9cbbb8b..a15a3ce97 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -4940,6 +4940,46 @@ func ValidateContainerStateTransition(newStatuses, oldStatuses []core.ContainerS return allErrs } +// ValidateInitContainerStateTransition test to if any illegal init container state transitions are being attempted +func ValidateInitContainerStateTransition(newStatuses, oldStatuses []core.ContainerStatus, fldpath *field.Path, podSpec *core.PodSpec) field.ErrorList { + allErrs := field.ErrorList{} + // If we should always restart, containers are allowed to leave the terminated state + if podSpec.RestartPolicy == core.RestartPolicyAlways { + return allErrs + } + for i, oldStatus := range oldStatuses { + // Skip any container that is not terminated + if oldStatus.State.Terminated == nil { + continue + } + // Skip any container that failed but is allowed to restart + if oldStatus.State.Terminated.ExitCode != 0 && podSpec.RestartPolicy == core.RestartPolicyOnFailure { + continue + } + + // Skip any restartable init container that is allowed to restart + isRestartableInitContainer := false + for _, c := range podSpec.InitContainers { + if oldStatus.Name == c.Name { + if c.RestartPolicy != nil && *c.RestartPolicy == core.ContainerRestartPolicyAlways { + isRestartableInitContainer = true + } + break + } + } + if isRestartableInitContainer { + continue + } + + for _, newStatus := range newStatuses { + if oldStatus.Name == newStatus.Name && newStatus.State.Terminated == nil { + allErrs = append(allErrs, field.Forbidden(fldpath.Index(i).Child("state"), "may not be transitioned to non-terminated state")) + } + } + } + return allErrs +} + // ValidatePodStatusUpdate checks for changes to status that shouldn't occur in normal operation. func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions) field.ErrorList { fldPath := field.NewPath("metadata") @@ -4961,7 +5001,7 @@ func ValidatePodStatusUpdate(newPod, oldPod *core.Pod, opts PodValidationOptions // If pod should not restart, make sure the status update does not transition // any terminated containers to a non-terminated state. allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.ContainerStatuses, oldPod.Status.ContainerStatuses, fldPath.Child("containerStatuses"), oldPod.Spec.RestartPolicy)...) - allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), oldPod.Spec.RestartPolicy)...) + allErrs = append(allErrs, ValidateInitContainerStateTransition(newPod.Status.InitContainerStatuses, oldPod.Status.InitContainerStatuses, fldPath.Child("initContainerStatuses"), &oldPod.Spec)...) // The kubelet will never restart ephemeral containers, so treat them like they have an implicit RestartPolicyNever. allErrs = append(allErrs, ValidateContainerStateTransition(newPod.Status.EphemeralContainerStatuses, oldPod.Status.EphemeralContainerStatuses, fldPath.Child("ephemeralContainerStatuses"), core.RestartPolicyNever)...) allErrs = append(allErrs, validatePodResourceClaimStatuses(newPod.Status.ResourceClaimStatuses, newPod.Spec.ResourceClaims, fldPath.Child("resourceClaimStatuses"))...) diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index 56791b47c..3cca890ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -250,7 +250,6 @@ const ( // owner: @harche // kep: http://kep.k8s.io/3386 // alpha: v1.25 - // beta: v1.27 // // Allows using event-driven PLEG (pod lifecycle event generator) through kubelet // which avoids frequent relisting of containers which helps optimize performance. @@ -1005,7 +1004,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha}, - EventedPLEG: {Default: false, PreRelease: featuregate.Beta}, // off by default, requires CRI Runtime support + EventedPLEG: {Default: false, PreRelease: featuregate.Alpha}, ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update @@ -1137,7 +1136,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS ElasticIndexedJob: {Default: true, PreRelease: featuregate.Beta}, - SchedulerQueueingHints: {Default: true, PreRelease: featuregate.Beta}, + SchedulerQueueingHints: {Default: false, PreRelease: featuregate.Beta}, SeccompDefault: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 @@ -1206,12 +1205,16 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS genericfeatures.OpenAPIV3: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 + genericfeatures.SeparateCacheWatchRPC: {Default: true, PreRelease: featuregate.Beta}, + genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 genericfeatures.UnauthenticatedHTTP2DOSMitigation: {Default: false, PreRelease: featuregate.Beta}, + genericfeatures.WatchFromStorageWithoutResourceVersion: {Default: false, PreRelease: featuregate.Beta}, + // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: diff --git a/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go b/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go index 5895df0c7..28771b6df 100644 --- a/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go +++ b/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go @@ -188,7 +188,7 @@ func AddNoNewPrivileges(sc *v1.SecurityContext) bool { var ( // These *must* be kept in sync with moby/moby. - // https://github.com/moby/moby/blob/master/oci/defaults.go#L105-L123 + // https://github.com/moby/moby/blob/master/oci/defaults.go#L105-L124 // @jessfraz will watch changes to those files upstream. defaultMaskedPaths = []string{ "/proc/asound", @@ -201,6 +201,7 @@ var ( "/proc/sched_debug", "/proc/scsi", "/sys/firmware", + "/sys/devices/virtual/powercap", } defaultReadonlyPaths = []string{ "/proc/bus", diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go b/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go index af309353b..238e919b3 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/types/types.go @@ -102,6 +102,23 @@ func IsFailedPreconditionError(err error) bool { return errors.As(err, &failedPreconditionError) } +type OperationNotSupported struct { + msg string +} + +func (err *OperationNotSupported) Error() string { + return err.msg +} + +func NewOperationNotSupportedError(msg string) *OperationNotSupported { + return &OperationNotSupported{msg: msg} +} + +func IsOperationNotSupportedError(err error) bool { + var operationNotSupportedError *OperationNotSupported + return errors.As(err, &operationNotSupportedError) +} + // TransientOperationFailure indicates operation failed with a transient error // and may fix itself when retried. type TransientOperationFailure struct { diff --git a/vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go b/vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go index c3fb6b369..ac5707721 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go +++ b/vendor/k8s.io/kubernetes/test/e2e/storage/utils/create.go @@ -62,6 +62,10 @@ func LoadFromManifests(files ...string) ([]interface{}, error) { if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), data, &what); err != nil { return fmt.Errorf("decode TypeMeta: %w", err) } + // Ignore empty documents. + if what.Kind == "" { + return nil + } factory := factories[what] if factory == nil { diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml index e47e6fed2..3d33dfc06 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v3.4.0/deploy/kubernetes//rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-attacher/raw/v4.5.0/deploy/kubernetes//rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml index 8f806887f..bdd93b894 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-health-monitor/external-health-monitor-controller/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.4.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-health-monitor/raw/v0.11.0/deploy/kubernetes/external-health-monitor-controller/rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml index ba65d9e7f..d80b5d793 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v3.1.0/deploy/kubernetes//rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-provisioner/raw/v4.0.0/deploy/kubernetes//rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external @@ -61,6 +61,13 @@ rules: - apiGroups: ["storage.k8s.io"] resources: ["volumeattachments"] verbs: ["get", "list", "watch"] + # (Alpha) Access to referencegrants is only needed when the CSI driver + # has the CrossNamespaceVolumeDataSource controller capability. + # In that case, external-provisioner requires "get", "list", "watch" + # permissions for "referencegrants" on "gateway.networking.k8s.io". + #- apiGroups: ["gateway.networking.k8s.io"] + # resources: ["referencegrants"] + # verbs: ["get", "list", "watch"] --- kind: ClusterRoleBinding @@ -89,9 +96,6 @@ metadata: rules: # Only one of the following rules for endpoints or leases is required based on # what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases. -- apiGroups: [""] - resources: ["endpoints"] - verbs: ["get", "watch", "list", "delete", "update", "create"] - apiGroups: ["coordination.k8s.io"] resources: ["leases"] verbs: ["get", "watch", "list", "delete", "update", "create"] diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml index 410d14ff9..4f8f7980d 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-resizer/rbac.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.4.0/deploy/kubernetes//rbac.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-resizer/raw/v1.10.0/deploy/kubernetes//rbac.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # This YAML file contains all RBAC objects that are necessary to run external @@ -46,6 +46,10 @@ rules: - apiGroups: [""] resources: ["events"] verbs: ["list", "watch", "create", "update", "patch"] + # only required if enabling the alpha volume modify feature + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattributesclasses"] + verbs: ["get", "list", "watch"] --- kind: ClusterRoleBinding @@ -63,7 +67,7 @@ roleRef: apiGroup: rbac.authorization.k8s.io --- -# Resizer must be able to work with end point in current namespace +# Resizer must be able to work with `leases` in current namespace # if (and only if) leadership election is enabled kind: Role apiVersion: rbac.authorization.k8s.io/v1 diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml index 335538d44..7b5f5ad34 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/external-snapshotter/csi-snapshotter/rbac-csi-snapshotter.yaml @@ -1,5 +1,5 @@ -# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v5.0.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml -# for csi-driver-host-path v1.8.0 +# Do not edit, downloaded from https://github.com/kubernetes-csi/external-snapshotter/raw/v7.0.1/deploy/kubernetes/csi-snapshotter/rbac-csi-snapshotter.yaml +# for csi-driver-host-path release-1.13 # by ./update-hostpath.sh # # Together with the RBAC file for external-provisioner, this YAML file @@ -12,6 +12,7 @@ # - optionally rename the non-namespaced ClusterRole if there # are conflicts with other deployments +--- apiVersion: v1 kind: ServiceAccount metadata: @@ -37,13 +38,24 @@ rules: - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotclasses"] verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update", "patch", "create"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents"] - verbs: ["create", "get", "list", "watch", "update", "delete", "patch"] + verbs: ["get", "list", "watch", "update", "patch", "create"] - apiGroups: ["snapshot.storage.k8s.io"] resources: ["volumesnapshotcontents/status"] verbs: ["update", "patch"] - + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents"] + verbs: ["get", "list", "watch", "update", "patch"] + - apiGroups: ["groupsnapshot.storage.k8s.io"] + resources: ["volumegroupsnapshotcontents/status"] + verbs: ["update", "patch"] --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1 diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md index 399316db2..ce3a7694e 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/README.md @@ -1,4 +1,4 @@ The files in this directory are exact copies of "kubernetes-latest" in -https://github.com/kubernetes-csi/csi-driver-host-path/tree/v1.8.0/deploy/ +https://github.com/kubernetes-csi/csi-driver-host-path/tree/release-1.13/deploy/ Do not edit manually. Run ./update-hostpath.sh to refresh the content. diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml index c8cf666a4..0250a52c1 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-driverinfo.yaml @@ -15,3 +15,6 @@ spec: # To determine at runtime which mode a volume uses, pod info and its # "csi.storage.k8s.io/ephemeral" entry are needed. podInfoOnMount: true + # Kubernetes may use fsGroup to change permissions and ownership + # of the volume to match user requested fsGroup in the pod's SecurityPolicy + fsGroupPolicy: File diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml index 6a41a0239..eb4c16348 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-plugin.yaml @@ -1,4 +1,4 @@ -# All of the individual sidecar RBAC roles get bound + # All of the individual sidecar RBAC roles get bound # to this account. kind: ServiceAccount apiVersion: v1 @@ -190,6 +190,7 @@ kind: StatefulSet apiVersion: apps/v1 metadata: name: csi-hostpathplugin + namespace: default labels: app.kubernetes.io/instance: hostpath.csi.k8s.io app.kubernetes.io/part-of: csi-driver-host-path @@ -218,7 +219,7 @@ spec: serviceAccountName: csi-hostpathplugin-sa containers: - name: hostpath - image: registry.k8s.io/sig-storage/hostpathplugin:v1.11.0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 args: - "--drivername=hostpath.csi.k8s.io" - "--v=5" @@ -261,7 +262,7 @@ spec: name: dev-dir - name: csi-external-health-monitor-controller - image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.7.0 + image: registry.k8s.io/sig-storage/csi-external-health-monitor-controller:v0.11.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" @@ -275,7 +276,7 @@ spec: mountPath: /csi - name: node-driver-registrar - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -303,13 +304,13 @@ spec: volumeMounts: - mountPath: /csi name: socket-dir - image: registry.k8s.io/sig-storage/livenessprobe:v2.7.0 + image: registry.k8s.io/sig-storage/livenessprobe:v2.12.0 args: - --csi-address=/csi/csi.sock - --health-port=9898 - name: csi-attacher - image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0 + image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -323,11 +324,12 @@ spec: name: socket-dir - name: csi-provisioner - image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 args: - -v=5 - --csi-address=/csi/csi.sock - --feature-gates=Topology=true + # end csi-provisioner args securityContext: # This is necessary only for systems with SELinux, where # non-privileged sidecar containers cannot access unix domain socket @@ -338,7 +340,7 @@ spec: name: socket-dir - name: csi-resizer - image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0 + image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0 args: - -v=5 - -csi-address=/csi/csi.sock @@ -352,7 +354,7 @@ spec: name: socket-dir - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1 args: - -v=5 - --csi-address=/csi/csi.sock diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml index 7253a70a9..02e5e8d7c 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-testing.yaml @@ -11,6 +11,7 @@ apiVersion: v1 kind: Service metadata: name: hostpath-service + namespace: default labels: app.kubernetes.io/instance: hostpath.csi.k8s.io app.kubernetes.io/part-of: csi-driver-host-path @@ -30,6 +31,7 @@ kind: StatefulSet apiVersion: apps/v1 metadata: name: csi-hostpath-socat + namespace: default labels: app.kubernetes.io/instance: hostpath.csi.k8s.io app.kubernetes.io/part-of: csi-driver-host-path @@ -64,7 +66,9 @@ spec: topologyKey: kubernetes.io/hostname containers: - name: socat - image: docker.io/alpine/socat:1.7.4.3-r0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 + command: + - socat args: - tcp-listen:10000,fork,reuseaddr - unix-connect:/csi/csi.sock diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml index 7684aea78..b17687c61 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-attacher.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-attacher - image: registry.k8s.io/sig-storage/csi-attacher:v4.0.0 + image: registry.k8s.io/sig-storage/csi-attacher:v4.5.0 args: - --v=5 - --csi-address=$(ADDRESS) diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml index 242c0b5aa..ef85e3170 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-resizer.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-resizer - image: registry.k8s.io/sig-storage/csi-resizer:v1.6.0 + image: registry.k8s.io/sig-storage/csi-resizer:v1.10.0 args: - "--v=5" - "--csi-address=$(ADDRESS)" diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml index f788a4a8f..6718060ec 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver-snapshotter.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-snapshotter - image: registry.k8s.io/sig-storage/csi-snapshotter:v6.1.0 + image: registry.k8s.io/sig-storage/csi-snapshotter:v7.0.1 args: - "--v=5" - "--csi-address=$(ADDRESS)" diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml index 4493deccc..3035532c5 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-driver.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-provisioner - image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 args: - "--csi-address=$(ADDRESS)" # Topology support is needed for the pod rescheduling test @@ -34,7 +34,7 @@ spec: - mountPath: /csi name: socket-dir - name: driver-registrar - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -53,7 +53,7 @@ spec: - mountPath: /registration name: registration-dir - name: mock - image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 args: - "--drivername=mock.storage.k8s.io" - "--nodeid=$(KUBE_NODE_NAME)" diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml index d1aa8ece8..189c17af6 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/mock/csi-mock-proxy.yaml @@ -15,7 +15,7 @@ spec: serviceAccountName: csi-mock containers: - name: csi-provisioner - image: registry.k8s.io/sig-storage/csi-provisioner:v3.4.0 + image: registry.k8s.io/sig-storage/csi-provisioner:v4.0.0 args: - "--csi-address=$(ADDRESS)" # Topology support is needed for the pod rescheduling test @@ -35,7 +35,7 @@ spec: - mountPath: /csi name: socket-dir - name: driver-registrar - image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.5.1 + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.10.0 args: - --v=5 - --csi-address=/csi/csi.sock @@ -53,7 +53,7 @@ spec: - mountPath: /registration name: registration-dir - name: mock - image: registry.k8s.io/sig-storage/hostpathplugin:v1.9.0 + image: registry.k8s.io/sig-storage/hostpathplugin:v1.13.0 args: - -v=5 - -nodeid=$(KUBE_NODE_NAME) diff --git a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh index ce60b39bc..122de0d18 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh +++ b/vendor/k8s.io/kubernetes/test/e2e/testing-manifests/storage-csi/update-hostpath.sh @@ -137,5 +137,5 @@ done grep -r image: hostpath/hostpath/csi-hostpath-plugin.yaml | while read -r image; do version=$(echo "$image" | sed -e 's/.*:\(.*\)/\1/') image=$(echo "$image" | sed -e 's/.*image: \([^:]*\).*/\1/') - sed -i -e "s;$image:.*;$image:$version;" mock/*.yaml + sed -i '' -e "s;$image:.*;$image:$version;" mock/*.yaml done diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index 578a0c0f4..3d51bd5f9 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -232,7 +232,7 @@ const ( func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config) { configs := map[ImageID]Config{} - configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.45"} + configs[Agnhost] = Config{list.PromoterE2eRegistry, "agnhost", "2.47"} configs[AgnhostPrivate] = Config{list.PrivateRegistry, "agnhost", "2.6"} configs[AuthenticatedAlpine] = Config{list.GcAuthenticatedRegistry, "alpine", "3.7"} configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"} @@ -241,8 +241,8 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"} configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"} configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.7"} - configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.9-0"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.5.6"} + configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.12-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} configs[InvalidRegistryImage] = Config{list.InvalidRegistry, "alpine", "3.1"} diff --git a/vendor/k8s.io/mount-utils/mount_helper_unix.go b/vendor/k8s.io/mount-utils/mount_helper_unix.go index 82210aaa2..9193e7c8d 100644 --- a/vendor/k8s.io/mount-utils/mount_helper_unix.go +++ b/vendor/k8s.io/mount-utils/mount_helper_unix.go @@ -61,7 +61,7 @@ func IsCorruptedMnt(err error) bool { underlyingError = err } - return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES || underlyingError == syscall.EHOSTDOWN + return underlyingError == syscall.ENOTCONN || underlyingError == syscall.ESTALE || underlyingError == syscall.EIO || underlyingError == syscall.EACCES || underlyingError == syscall.EHOSTDOWN || underlyingError == syscall.EWOULDBLOCK } // MountInfo represents a single line in /proc//mountinfo. diff --git a/vendor/k8s.io/mount-utils/mount_windows.go b/vendor/k8s.io/mount-utils/mount_windows.go index 02a963b1b..be714646e 100644 --- a/vendor/k8s.io/mount-utils/mount_windows.go +++ b/vendor/k8s.io/mount-utils/mount_windows.go @@ -150,12 +150,12 @@ func (mounter *Mounter) MountSensitive(source string, target string, fstype stri mklinkSource = mklinkSource + "\\" } - output, err := exec.Command("cmd", "/c", "mklink", "/D", target, mklinkSource).CombinedOutput() + err := os.Symlink(mklinkSource, target) if err != nil { - klog.Errorf("mklink failed: %v, source(%q) target(%q) output: %q", err, mklinkSource, target, string(output)) + klog.Errorf("symlink failed: %v, source(%q) target(%q)", err, mklinkSource, target) return err } - klog.V(2).Infof("mklink source(%q) on target(%q) successfully, output: %q", mklinkSource, target, string(output)) + klog.V(2).Infof("symlink source(%q) on target(%q) successfully", mklinkSource, target) return nil } @@ -219,8 +219,9 @@ func removeSMBMapping(remotepath string) (string, error) { func (mounter *Mounter) Unmount(target string) error { klog.V(4).Infof("Unmount target (%q)", target) target = NormalizeWindowsPath(target) - if output, err := exec.Command("cmd", "/c", "rmdir", target).CombinedOutput(); err != nil { - klog.Errorf("rmdir failed: %v, output: %q", err, string(output)) + + if err := os.Remove(target); err != nil { + klog.Errorf("removing directory %s failed: %v", target, err) return err } return nil diff --git a/vendor/modules.txt b/vendor/modules.txt index 31d49ff29..22fd92bbf 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -738,7 +738,7 @@ gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.1 ## explicit gopkg.in/yaml.v3 -# k8s.io/api v0.28.3 => k8s.io/api v0.28.3 +# k8s.io/api v0.28.12 => k8s.io/api v0.28.12 ## explicit; go 1.20 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -794,12 +794,12 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.28.3 +# k8s.io/apiextensions-apiserver v0.0.0 => k8s.io/apiextensions-apiserver v0.28.12 ## explicit; go 1.20 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/features -# k8s.io/apimachinery v0.28.3 => k8s.io/apimachinery v0.28.3 +# k8s.io/apimachinery v0.28.12 => k8s.io/apimachinery v0.28.12 ## explicit; go 1.20 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -861,7 +861,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.28.3 => k8s.io/apiserver v0.28.3 +# k8s.io/apiserver v0.28.12 => k8s.io/apiserver v0.28.12 ## explicit; go 1.20 k8s.io/apiserver/pkg/admission k8s.io/apiserver/pkg/admission/cel @@ -1006,7 +1006,7 @@ k8s.io/apiserver/plugin/pkg/audit/truncate k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook -# k8s.io/client-go v0.28.3 => k8s.io/client-go v0.28.3 +# k8s.io/client-go v0.28.12 => k8s.io/client-go v0.28.12 ## explicit; go 1.20 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -1327,7 +1327,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/cloud-provider v0.28.3 => k8s.io/cloud-provider v0.28.3 +# k8s.io/cloud-provider v0.28.12 => k8s.io/cloud-provider v0.28.12 ## explicit; go 1.20 k8s.io/cloud-provider k8s.io/cloud-provider/api @@ -1346,7 +1346,7 @@ k8s.io/cloud-provider/service/helpers k8s.io/cloud-provider/volume k8s.io/cloud-provider/volume/errors k8s.io/cloud-provider/volume/helpers -# k8s.io/component-base v0.28.3 => k8s.io/component-base v0.28.3 +# k8s.io/component-base v0.28.12 => k8s.io/component-base v0.28.12 ## explicit; go 1.20 k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -1368,13 +1368,13 @@ k8s.io/component-base/metrics/testutil k8s.io/component-base/tracing k8s.io/component-base/tracing/api/v1 k8s.io/component-base/version -# k8s.io/component-helpers v0.28.3 => k8s.io/component-helpers v0.28.3 +# k8s.io/component-helpers v0.28.12 => k8s.io/component-helpers v0.28.12 ## explicit; go 1.20 k8s.io/component-helpers/node/util k8s.io/component-helpers/scheduling/corev1 k8s.io/component-helpers/scheduling/corev1/nodeaffinity k8s.io/component-helpers/storage/volume -# k8s.io/controller-manager v0.28.3 => k8s.io/controller-manager v0.28.3 +# k8s.io/controller-manager v0.28.12 => k8s.io/controller-manager v0.28.12 ## explicit; go 1.20 k8s.io/controller-manager/config k8s.io/controller-manager/config/v1 @@ -1394,7 +1394,7 @@ k8s.io/klog/v2/internal/clock k8s.io/klog/v2/internal/dbg k8s.io/klog/v2/internal/serialize k8s.io/klog/v2/internal/severity -# k8s.io/kms v0.28.3 +# k8s.io/kms v0.28.12 ## explicit; go 1.20 k8s.io/kms/apis/v1beta1 k8s.io/kms/apis/v2 @@ -1422,15 +1422,15 @@ k8s.io/kube-openapi/pkg/validation/errors k8s.io/kube-openapi/pkg/validation/spec k8s.io/kube-openapi/pkg/validation/strfmt k8s.io/kube-openapi/pkg/validation/strfmt/bson -# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.28.3 +# k8s.io/kubectl v0.0.0 => k8s.io/kubectl v0.28.12 ## explicit; go 1.20 k8s.io/kubectl/pkg/scale k8s.io/kubectl/pkg/util/podutils -# k8s.io/kubelet v0.28.3 => k8s.io/kubelet v0.28.3 +# k8s.io/kubelet v0.28.12 => k8s.io/kubelet v0.28.12 ## explicit; go 1.20 k8s.io/kubelet/pkg/apis k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.28.4 +# k8s.io/kubernetes v1.28.12 ## explicit; go 1.20 k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service @@ -1496,10 +1496,10 @@ k8s.io/kubernetes/test/utils k8s.io/kubernetes/test/utils/format k8s.io/kubernetes/test/utils/image k8s.io/kubernetes/test/utils/kubeconfig -# k8s.io/mount-utils v0.28.3 => k8s.io/mount-utils v0.28.3 +# k8s.io/mount-utils v0.28.12 => k8s.io/mount-utils v0.28.12 ## explicit; go 1.20 k8s.io/mount-utils -# k8s.io/pod-security-admission v0.28.3 => k8s.io/pod-security-admission v0.28.3 +# k8s.io/pod-security-admission v0.28.12 => k8s.io/pod-security-admission v0.28.12 ## explicit; go 1.20 k8s.io/pod-security-admission/api k8s.io/pod-security-admission/policy @@ -1628,32 +1628,32 @@ sigs.k8s.io/structured-merge-diff/v4/value # sigs.k8s.io/yaml v1.3.0 ## explicit; go 1.12 sigs.k8s.io/yaml -# k8s.io/api => k8s.io/api v0.28.3 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.3 -# k8s.io/apimachinery => k8s.io/apimachinery v0.28.3 -# k8s.io/apiserver => k8s.io/apiserver v0.28.3 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.3 -# k8s.io/client-go => k8s.io/client-go v0.28.3 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.3 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.3 -# k8s.io/code-generator => k8s.io/code-generator v0.28.3 -# k8s.io/component-base => k8s.io/component-base v0.28.3 -# k8s.io/component-helpers => k8s.io/component-helpers v0.28.3 -# k8s.io/controller-manager => k8s.io/controller-manager v0.28.3 -# k8s.io/cri-api => k8s.io/cri-api v0.28.3 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.3 -# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.3 -# k8s.io/endpointslice => k8s.io/endpointslice v0.28.3 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.3 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.3 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.3 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.3 -# k8s.io/kubectl => k8s.io/kubectl v0.28.3 -# k8s.io/kubelet => k8s.io/kubelet v0.28.3 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.3 -# k8s.io/metrics => k8s.io/metrics v0.28.3 -# k8s.io/mount-utils => k8s.io/mount-utils v0.28.3 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.3 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.3 -# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.28.3 -# k8s.io/sample-controller => k8s.io/sample-controller v0.28.3 +# k8s.io/api => k8s.io/api v0.28.12 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.12 +# k8s.io/apimachinery => k8s.io/apimachinery v0.28.12 +# k8s.io/apiserver => k8s.io/apiserver v0.28.12 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.28.12 +# k8s.io/client-go => k8s.io/client-go v0.28.12 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.28.12 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.28.12 +# k8s.io/code-generator => k8s.io/code-generator v0.28.12 +# k8s.io/component-base => k8s.io/component-base v0.28.12 +# k8s.io/component-helpers => k8s.io/component-helpers v0.28.12 +# k8s.io/controller-manager => k8s.io/controller-manager v0.28.12 +# k8s.io/cri-api => k8s.io/cri-api v0.28.12 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.28.12 +# k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.28.12 +# k8s.io/endpointslice => k8s.io/endpointslice v0.28.12 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.28.12 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.28.12 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.28.12 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.28.12 +# k8s.io/kubectl => k8s.io/kubectl v0.28.12 +# k8s.io/kubelet => k8s.io/kubelet v0.28.12 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.28.12 +# k8s.io/metrics => k8s.io/metrics v0.28.12 +# k8s.io/mount-utils => k8s.io/mount-utils v0.28.12 +# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.28.12 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.28.12 +# k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.28.12 +# k8s.io/sample-controller => k8s.io/sample-controller v0.28.12